diff options
Diffstat (limited to 'drivers/net')
975 files changed, 38252 insertions, 34595 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c3dbe64e628e..260f9f46668b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -287,7 +287,7 @@ config GTP 3GPP TS 29.060 standards. To compile this drivers as a module, choose M here: the module - wil be called gtp. + will be called gtp. config MACSEC tristate "IEEE 802.1AE MAC-level encryption (MACsec)" @@ -426,6 +426,13 @@ config VSOCKMON mostly intended for developers or support to debug vsock issues. If unsure, say N. +config MHI_NET + tristate "MHI network driver" + depends on MHI_BUS + help + This is the network driver for MHI bus. It can be used with + QCOM based WWAN modems (like SDX55). Say Y or M. + endif # NET_CORE config SUNGEM_PHY @@ -489,8 +496,6 @@ source "drivers/net/usb/Kconfig" source "drivers/net/wireless/Kconfig" -source "drivers/net/wimax/Kconfig" - source "drivers/net/wan/Kconfig" source "drivers/net/ieee802154/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 72e18d505d1a..36e2e41ed2aa 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_GTP) += gtp.o obj-$(CONFIG_NLMON) += nlmon.o obj-$(CONFIG_NET_VRF) += vrf.o obj-$(CONFIG_VSOCKMON) += vsockmon.o +obj-$(CONFIG_MHI_NET) += mhi_net.o # # Networking Drivers @@ -66,7 +67,6 @@ obj-$(CONFIG_NET_SB1000) += sb1000.o obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ -obj-$(CONFIG_WIMAX) += wimax/ obj-$(CONFIG_IEEE802154) += ieee802154/ obj-$(CONFIG_VMXNET3) += vmxnet3/ diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index ff0bea1554f9..85ebd2b7e446 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -510,7 +510,7 @@ static const struct net_device_ops bareudp_netdev_ops = { .ndo_open = bareudp_open, .ndo_stop = bareudp_stop, .ndo_start_xmit = bareudp_xmit, - .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_fill_metadata_dst = bareudp_fill_metadata_dst, }; @@ -522,7 +522,7 @@ static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = { }; /* Info for udev, that this is a virtual tunnel endpoint */ -static struct device_type bareudp_type = { +static const struct device_type bareudp_type = { .name = "bareudp", }; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 47afc5938c26..5fe5232cc3f3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1228,14 +1228,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev, } #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ NETIF_F_HIGHDMA | NETIF_F_LRO) #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_RXCSUM | NETIF_F_ALL_TSO) + NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_ALL_TSO) + NETIF_F_GSO_SOFTWARE) static void bond_compute_features(struct bonding *bond) @@ -1291,8 +1291,7 @@ done: bond_dev->vlan_features = vlan_features; bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX | - NETIF_F_GSO_UDP_L4; + NETIF_F_HW_VLAN_STAG_TX; #ifdef CONFIG_XFRM_OFFLOAD bond_dev->hw_enc_features |= xfrm_features; #endif /* CONFIG_XFRM_OFFLOAD */ @@ -4746,16 +4745,14 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; -#ifdef CONFIG_XFRM_OFFLOAD - bond_dev->hw_features |= BOND_XFRM_FEATURES; -#endif /* CONFIG_XFRM_OFFLOAD */ + bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; #ifdef CONFIG_XFRM_OFFLOAD - /* Disable XFRM features if this isn't an active-backup config */ - if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - bond_dev->features &= ~BOND_XFRM_FEATURES; + bond_dev->hw_features |= BOND_XFRM_FEATURES; + /* Only enable XFRM features if this is an active-backup config */ + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) + bond_dev->features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ } diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index fd5c9cbe45b1..56d34be5e797 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/proc_fs.h> +#include <linux/ethtool.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/netns/generic.h> diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index c14de95d2ca7..5284f0ab3b06 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -468,7 +468,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) } reg_mid = at91_can_id_to_reg_mid(cf->can_id); reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | - (cf->can_dlc << 16) | AT91_MCR_MTCR; + (cf->len << 16) | AT91_MCR_MTCR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); @@ -481,7 +481,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) /* This triggers transmission */ at91_write(priv, AT91_MCR(mb), reg_mcr); - stats->tx_bytes += cf->can_dlc; + stats->tx_bytes += cf->len; /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv)); @@ -554,7 +554,7 @@ static void at91_rx_overflow_err(struct net_device *dev) cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); } @@ -580,7 +580,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb, cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; reg_msr = at91_read(priv, AT91_MSR(mb)); - cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf); + cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf); if (reg_msr & AT91_MSR_MRTR) cf->can_id |= CAN_RTR_FLAG; @@ -619,7 +619,7 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) at91_read_mb(dev, mb, cf); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); @@ -780,7 +780,7 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) at91_poll_err_frame(dev, cf, reg_sr); dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; + dev->stats.rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -1047,7 +1047,7 @@ static void at91_irq_err(struct net_device *dev) at91_irq_err_state(dev, cf, new_state); dev->stats.rx_packets++; - dev->stats.rx_bytes += cf->can_dlc; + dev->stats.rx_bytes += cf->len; netif_rx(skb); priv->can.state = new_state; diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 1a9e9b9a4bf6..63f48b016ecd 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -306,7 +306,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, struct can_frame *frame, int idx) { struct c_can_priv *priv = netdev_priv(dev); - u16 ctrl = IF_MCONT_TX | frame->can_dlc; + u16 ctrl = IF_MCONT_TX | frame->len; bool rtr = frame->can_id & CAN_RTR_FLAG; u32 arb = IF_ARB_MSGVAL; int i; @@ -339,7 +339,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, if (priv->type == BOSCH_D_CAN) { u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface); - for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) { + for (i = 0; i < frame->len; i += 4, dreg += 2) { data = (u32)frame->data[i]; data |= (u32)frame->data[i + 1] << 8; data |= (u32)frame->data[i + 2] << 16; @@ -347,7 +347,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface, priv->write_reg32(priv, dreg, data); } } else { - for (i = 0; i < frame->can_dlc; i += 2) { + for (i = 0; i < frame->len; i += 2) { priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, frame->data[i] | @@ -397,7 +397,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) return -ENOMEM; } - frame->can_dlc = get_can_dlc(ctrl & 0x0F); + frame->len = can_cc_dlc2len(ctrl & 0x0F); arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface)); @@ -412,7 +412,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) int i, dreg = C_CAN_IFACE(DATA1_REG, iface); if (priv->type == BOSCH_D_CAN) { - for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) { + for (i = 0; i < frame->len; i += 4, dreg += 2) { data = priv->read_reg32(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; @@ -420,7 +420,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) frame->data[i + 3] = data >> 24; } } else { - for (i = 0; i < frame->can_dlc; i += 2, dreg++) { + for (i = 0; i < frame->len; i += 2, dreg++) { data = priv->read_reg(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; @@ -429,7 +429,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) } stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; + stats->rx_bytes += frame->len; netif_receive_skb(skb); return 0; @@ -475,7 +475,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, * transmit as we might race against do_tx(). */ c_can_setup_tx_object(dev, IF_TX, frame, idx); - priv->dlc[idx] = frame->can_dlc; + priv->dlc[idx] = frame->len; can_put_echo_skb(skb, dev, idx); /* Update the active bits */ @@ -977,7 +977,7 @@ static int c_can_handle_state_change(struct net_device *dev, } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -1047,7 +1047,7 @@ static int c_can_handle_bus_err(struct net_device *dev, } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 07e2b8df5153..8d9f332c35e0 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -390,7 +390,7 @@ static void cc770_tx(struct net_device *dev, int mo) u32 id; int i; - dlc = cf->can_dlc; + dlc = cf->len; id = cf->can_id; rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; @@ -470,7 +470,7 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) cf->can_id = CAN_RTR_FLAG; if (config & MSGCFG_XTD) cf->can_id |= CAN_EFF_FLAG; - cf->can_dlc = 0; + cf->len = 0; } else { if (config & MSGCFG_XTD) { id = cc770_read_reg(priv, msgobj[mo].id[3]); @@ -486,13 +486,13 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) } cf->can_id = id; - cf->can_dlc = get_can_dlc((config & 0xf0) >> 4); - for (i = 0; i < cf->can_dlc; i++) + cf->len = can_cc_dlc2len((config & 0xf0) >> 4); + for (i = 0; i < cf->len; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -572,7 +572,7 @@ static int cc770_err(struct net_device *dev, u8 status) stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); return 0; @@ -699,7 +699,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) } cf = (struct can_frame *)priv->tx_skb->data; - stats->tx_bytes += cf->can_dlc; + stats->tx_bytes += cf->len; stats->tx_packets++; can_put_echo_skb(priv->tx_skb, dev, 0); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 81e39d7507d8..3486704c8a95 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -30,12 +30,12 @@ MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 16, 20, 24, 32, 48, 64}; -/* get data length from can_dlc with sanitized can_dlc */ -u8 can_dlc2len(u8 can_dlc) +/* get data length from raw data length code (DLC) */ +u8 can_fd_dlc2len(u8 dlc) { - return dlc2len[can_dlc & 0x0F]; + return dlc2len[dlc & 0x0F]; } -EXPORT_SYMBOL_GPL(can_dlc2len); +EXPORT_SYMBOL_GPL(can_fd_dlc2len); static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ 9, 9, 9, 9, /* 9 - 12 */ @@ -49,14 +49,14 @@ static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */ /* map the sanitized data length to an appropriate data length code */ -u8 can_len2dlc(u8 len) +u8 can_fd_len2dlc(u8 len) { if (unlikely(len > 64)) return 0xF; return len2dlc[len]; } -EXPORT_SYMBOL_GPL(can_len2dlc); +EXPORT_SYMBOL_GPL(can_fd_len2dlc); #ifdef CONFIG_CAN_CALC_BITTIMING #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ @@ -595,7 +595,7 @@ static void can_restart(struct net_device *dev) netif_rx_ni(skb); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; restart: netdev_dbg(dev, "restarted\n"); @@ -737,7 +737,7 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf) return NULL; (*cf)->can_id = CAN_ERR_FLAG; - (*cf)->can_dlc = CAN_ERR_DLC; + (*cf)->len = CAN_ERR_DLC; return skb; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 99e5f272205d..038fe1036df2 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -236,8 +236,8 @@ #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* default to BE register access */ #define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) -/* Setup stop mode to support wakeup */ -#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) +/* Setup stop mode with GPR to support wakeup */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8) /* Support CAN-FD mode */ #define FLEXCAN_QUIRK_SUPPORT_FD BIT(9) /* support memory detection and correction */ @@ -381,7 +381,7 @@ static const struct flexcan_devtype_data fsl_imx28_devtype_data = { static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_SETUP_STOP_MODE, + FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR, }; static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = { @@ -393,7 +393,7 @@ static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = { static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | - FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE | + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC, }; @@ -746,7 +746,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u32 can_id; u32 data; - u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_len2dlc(cfd->len)) << 16); + u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16); int i; if (can_dropped_invalid_skb(dev, skb)) @@ -1000,12 +1000,12 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK; if (reg_ctrl & FLEXCAN_MB_CNT_EDL) { - cfd->len = can_dlc2len(get_canfd_dlc((reg_ctrl >> 16) & 0xf)); + cfd->len = can_fd_dlc2len((reg_ctrl >> 16) & 0xf); if (reg_ctrl & FLEXCAN_MB_CNT_BRS) cfd->flags |= CANFD_BRS; } else { - cfd->len = get_can_dlc((reg_ctrl >> 16) & 0xf); + cfd->len = can_cc_dlc2len((reg_ctrl >> 16) & 0xf); if (reg_ctrl & FLEXCAN_MB_CNT_RTR) cfd->can_id |= CAN_RTR_FLAG; @@ -1346,6 +1346,72 @@ static void flexcan_ram_init(struct net_device *dev) priv->write(reg_ctrl2, ®s->ctrl2); } +static int flexcan_rx_offload_setup(struct net_device *dev) +{ + struct flexcan_priv *priv = netdev_priv(dev); + int err; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN; + else + priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; + priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + + (sizeof(priv->regs->mb[1]) / priv->mb_size); + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + priv->tx_mb_reserved = + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP); + else + priv->tx_mb_reserved = + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO); + priv->tx_mb_idx = priv->mb_count - 1; + priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); + priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + + priv->offload.mailbox_read = flexcan_mailbox_read; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; + priv->offload.mb_last = priv->mb_count - 2; + + priv->rx_mask = GENMASK_ULL(priv->offload.mb_last, + priv->offload.mb_first); + err = can_rx_offload_add_timestamp(dev, &priv->offload); + } else { + priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; + err = can_rx_offload_add_fifo(dev, &priv->offload, + FLEXCAN_NAPI_WEIGHT); + } + + return err; +} + +static void flexcan_chip_interrupts_enable(const struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + u64 reg_imask; + + disable_irq(dev->irq); + priv->write(priv->reg_ctrl_default, ®s->ctrl); + reg_imask = priv->rx_mask | priv->tx_mask; + priv->write(upper_32_bits(reg_imask), ®s->imask2); + priv->write(lower_32_bits(reg_imask), ®s->imask1); + enable_irq(dev->irq); +} + +static void flexcan_chip_interrupts_disable(const struct net_device *dev) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + struct flexcan_regs __iomem *regs = priv->regs; + + priv->write(0, ®s->imask2); + priv->write(0, ®s->imask1); + priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); +} + /* flexcan_chip_start * * this functions is entered with clocks enabled @@ -1356,7 +1422,6 @@ static int flexcan_chip_start(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr; - u64 reg_imask; int err, i; struct flexcan_mb __iomem *mb; @@ -1574,14 +1639,6 @@ static int flexcan_chip_start(struct net_device *dev) priv->can.state = CAN_STATE_ERROR_ACTIVE; - /* enable interrupts atomically */ - disable_irq(dev->irq); - priv->write(priv->reg_ctrl_default, ®s->ctrl); - reg_imask = priv->rx_mask | priv->tx_mask; - priv->write(upper_32_bits(reg_imask), ®s->imask2); - priv->write(lower_32_bits(reg_imask), ®s->imask1); - enable_irq(dev->irq); - /* print chip status */ netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__, priv->read(®s->mcr), priv->read(®s->ctrl)); @@ -1600,7 +1657,6 @@ static int flexcan_chip_start(struct net_device *dev) static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error) { struct flexcan_priv *priv = netdev_priv(dev); - struct flexcan_regs __iomem *regs = priv->regs; int err; /* freeze + disable module */ @@ -1611,12 +1667,6 @@ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error) if (err && !disable_on_error) goto out_chip_unfreeze; - /* Disable all interrupts */ - priv->write(0, ®s->imask2); - priv->write(0, ®s->imask1); - priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, - ®s->ctrl); - priv->can.state = CAN_STATE_STOPPED; return 0; @@ -1662,61 +1712,33 @@ static int flexcan_open(struct net_device *dev) if (err) goto out_close; - err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); + err = flexcan_rx_offload_setup(dev); if (err) goto out_transceiver_disable; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD) - priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN; - else - priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; - priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + - (sizeof(priv->regs->mb[1]) / priv->mb_size); - - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) - priv->tx_mb_reserved = - flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP); - else - priv->tx_mb_reserved = - flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO); - priv->tx_mb_idx = priv->mb_count - 1; - priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); - priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); - - priv->offload.mailbox_read = flexcan_mailbox_read; + err = flexcan_chip_start(dev); + if (err) + goto out_can_rx_offload_del; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; - priv->offload.mb_last = priv->mb_count - 2; + can_rx_offload_enable(&priv->offload); - priv->rx_mask = GENMASK_ULL(priv->offload.mb_last, - priv->offload.mb_first); - err = can_rx_offload_add_timestamp(dev, &priv->offload); - } else { - priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | - FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; - err = can_rx_offload_add_fifo(dev, &priv->offload, - FLEXCAN_NAPI_WEIGHT); - } + err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) - goto out_free_irq; + goto out_can_rx_offload_disable; - /* start chip and queuing */ - err = flexcan_chip_start(dev); - if (err) - goto out_offload_del; + flexcan_chip_interrupts_enable(dev); can_led_event(dev, CAN_LED_EVENT_OPEN); - can_rx_offload_enable(&priv->offload); netif_start_queue(dev); return 0; - out_offload_del: + out_can_rx_offload_disable: + can_rx_offload_disable(&priv->offload); + flexcan_chip_stop(dev); + out_can_rx_offload_del: can_rx_offload_del(&priv->offload); - out_free_irq: - free_irq(dev->irq, dev); out_transceiver_disable: flexcan_transceiver_disable(priv); out_close: @@ -1732,14 +1754,15 @@ static int flexcan_close(struct net_device *dev) struct flexcan_priv *priv = netdev_priv(dev); netif_stop_queue(dev); + flexcan_chip_interrupts_disable(dev); + free_irq(dev->irq, dev); can_rx_offload_disable(&priv->offload); flexcan_chip_stop_disable_on_error(dev); can_rx_offload_del(&priv->offload); - free_irq(dev->irq, dev); flexcan_transceiver_disable(priv); - close_candev(dev); + pm_runtime_put(priv->dev); can_led_event(dev, CAN_LED_EVENT_STOP); @@ -1757,6 +1780,8 @@ static int flexcan_set_mode(struct net_device *dev, enum can_mode mode) if (err) return err; + flexcan_chip_interrupts_enable(dev); + netif_wake_queue(dev); break; @@ -1915,15 +1940,8 @@ static const struct of_device_id flexcan_of_match[] = { }; MODULE_DEVICE_TABLE(of, flexcan_of_match); -static const struct platform_device_id flexcan_id_table[] = { - { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(platform, flexcan_id_table); - static int flexcan_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; @@ -1972,15 +1990,7 @@ static int flexcan_probe(struct platform_device *pdev) if (IS_ERR(regs)) return PTR_ERR(regs); - of_id = of_match_device(flexcan_of_match, &pdev->dev); - if (of_id) { - devtype_data = of_id->data; - } else if (platform_get_device_id(pdev)->driver_data) { - devtype_data = (struct flexcan_devtype_data *) - platform_get_device_id(pdev)->driver_data; - } else { - return -ENODEV; - } + devtype_data = of_device_get_match_data(&pdev->dev); if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) { @@ -2047,7 +2057,7 @@ static int flexcan_probe(struct platform_device *pdev) of_can_transceiver(dev); devm_can_led_init(dev); - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) { + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { err = flexcan_setup_stop_mode(pdev); if (err) dev_dbg(&pdev->dev, "failed to setup stop-mode\n"); @@ -2095,6 +2105,8 @@ static int __maybe_unused flexcan_suspend(struct device *device) if (err) return err; + flexcan_chip_interrupts_disable(dev); + err = pinctrl_pm_select_sleep_state(device); if (err) return err; @@ -2130,6 +2142,8 @@ static int __maybe_unused flexcan_resume(struct device *device) err = flexcan_chip_start(dev); if (err) return err; + + flexcan_chip_interrupts_enable(dev); } } @@ -2206,7 +2220,6 @@ static struct platform_driver flexcan_driver = { }, .probe = flexcan_probe, .remove = flexcan_remove, - .id_table = flexcan_id_table, }; module_platform_driver(flexcan_driver); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 39802f107eb1..f5d94a692576 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1201,12 +1201,12 @@ static int grcan_receive(struct net_device *dev, int budget) cf->can_id = ((slot[0] & GRCAN_MSG_BID) >> GRCAN_MSG_BID_BIT); } - cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC) + cf->len = can_cc_dlc2len((slot[1] & GRCAN_MSG_DLC) >> GRCAN_MSG_DLC_BIT); if (rtr) { cf->can_id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) { + for (i = 0; i < cf->len; i++) { j = GRCAN_MSG_DATA_SLOT_INDEX(i); shift = GRCAN_MSG_DATA_SHIFT(i); cf->data[i] = (u8)(slot[j] >> shift); @@ -1215,7 +1215,7 @@ static int grcan_receive(struct net_device *dev, int budget) /* Update statistics and read pointer */ stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); @@ -1399,7 +1399,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, eff = cf->can_id & CAN_EFF_FLAG; rtr = cf->can_id & CAN_RTR_FLAG; id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK); - dlc = cf->can_dlc; + dlc = cf->len; if (eff) tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID; else @@ -1447,7 +1447,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, * can_put_echo_skb would be an error unless other measures are * taken. */ - priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */ + priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */ can_put_echo_skb(skb, dev, slotindex); /* Make sure everything is written before allowing hardware to diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 74503cacf594..86b0e1406a21 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -271,9 +271,9 @@ static void ifi_canfd_read_fifo(struct net_device *ndev) dlc = (rxdlc >> IFI_CANFD_RXFIFO_DLC_DLC_OFFSET) & IFI_CANFD_RXFIFO_DLC_DLC_MASK; if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) - cf->len = can_dlc2len(dlc); + cf->len = can_fd_dlc2len(dlc); else - cf->len = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID); id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET); @@ -431,7 +431,7 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -523,7 +523,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev, } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -900,7 +900,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, txid = cf->can_id & CAN_SFF_MASK; } - txdlc = can_len2dlc(cf->len); + txdlc = can_fd_len2dlc(cf->len); if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) { txdlc |= IFI_CANFD_TXFIFO_DLC_EDL; if (cf->flags & CANFD_BRS) diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index f929db893957..2a6c918186c0 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -916,10 +916,10 @@ static void ican3_to_can_frame(struct ican3_dev *mod, cf->can_id |= desc->data[0] << 3; cf->can_id |= (desc->data[1] & 0xe0) >> 5; - cf->can_dlc = get_can_dlc(desc->data[1] & ICAN3_CAN_DLC_MASK); - memcpy(cf->data, &desc->data[2], cf->can_dlc); + cf->len = can_cc_dlc2len(desc->data[1] & ICAN3_CAN_DLC_MASK); + memcpy(cf->data, &desc->data[2], cf->len); } else { - cf->can_dlc = get_can_dlc(desc->data[0] & ICAN3_CAN_DLC_MASK); + cf->len = can_cc_dlc2len(desc->data[0] & ICAN3_CAN_DLC_MASK); if (desc->data[0] & ICAN3_EFF_RTR) cf->can_id |= CAN_RTR_FLAG; @@ -934,7 +934,7 @@ static void ican3_to_can_frame(struct ican3_dev *mod, cf->can_id |= desc->data[3] >> 5; /* 2-0 */ } - memcpy(cf->data, &desc->data[6], cf->can_dlc); + memcpy(cf->data, &desc->data[6], cf->len); } } @@ -947,7 +947,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod, /* we always use the extended format, with the ECHO flag set */ desc->command = ICAN3_CAN_TYPE_EFF; - desc->data[0] |= cf->can_dlc; + desc->data[0] |= cf->len; desc->data[1] |= ICAN3_ECHO; /* support single transmission (no retries) mode */ @@ -970,7 +970,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod, } /* copy the data bits into the descriptor */ - memcpy(&desc->data[6], cf->data, cf->can_dlc); + memcpy(&desc->data[6], cf->data, cf->len); } /* @@ -1294,7 +1294,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod) } cf = (struct can_frame *)skb->data; - dlc = cf->can_dlc; + dlc = cf->len; /* check flag whether this packet has to be looped back */ if (skb->pkt_type != PACKET_LOOPBACK) { @@ -1332,10 +1332,10 @@ static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb) if (cf->can_id != echo_cf->can_id) return false; - if (cf->can_dlc != echo_cf->can_dlc) + if (cf->len != echo_cf->len) return false; - return memcmp(cf->data, echo_cf->data, cf->can_dlc) == 0; + return memcmp(cf->data, echo_cf->data, cf->len) == 0; } /* @@ -1421,7 +1421,7 @@ static int ican3_recv_skb(struct ican3_dev *mod) /* update statistics, receive the skb */ stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); err_noalloc: diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 43151dd6cb1c..969cedb9b0b6 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -742,7 +742,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; p->header[0] |= cf->can_id & CAN_EFF_MASK; - p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; + p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT; p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; if (can_is_canfd_skb(skb)) { @@ -1176,7 +1176,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) cf->can_id |= CAN_EFF_FLAG; - cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); + cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) cf->can_id |= CAN_RTR_FLAG; @@ -1301,7 +1301,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, cf->data[7] = bec.rxerr; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); return 0; @@ -1500,7 +1500,7 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, if (skb) { cf->can_id |= CAN_ERR_BUSERROR; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; stats->rx_packets++; netif_rx(skb); } else { @@ -1602,7 +1602,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { u8 data_len; - data_len = can_dlc2len(p->header[1] >> + data_len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT); pos += DIV_ROUND_UP(data_len, 4); } diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index 5f9f8192dd0b..45ad1b3f0cd0 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,21 +1,28 @@ # SPDX-License-Identifier: GPL-2.0-only -config CAN_M_CAN +menuconfig CAN_M_CAN tristate "Bosch M_CAN support" help Say Y here if you want support for Bosch M_CAN controller framework. This is common support for devices that embed the Bosch M_CAN IP. +if CAN_M_CAN + +config CAN_M_CAN_PCI + tristate "Generic PCI Bus based M_CAN driver" + depends on PCI + help + Say Y here if you want to support Bosch M_CAN controller connected + to the pci bus. + config CAN_M_CAN_PLATFORM tristate "Bosch M_CAN support for io-mapped devices" depends on HAS_IOMEM - depends on CAN_M_CAN help Say Y here if you want support for IO Mapped Bosch M_CAN controller. This support is for devices that have the Bosch M_CAN controller IP embedded into the device and the IP is IO Mapped to the processor. config CAN_M_CAN_TCAN4X5X - depends on CAN_M_CAN depends on SPI select REGMAP_SPI tristate "TCAN4X5X M_CAN device" @@ -23,3 +30,5 @@ config CAN_M_CAN_TCAN4X5X Say Y here if you want support for Texas Instruments TCAN4x5x M_CAN controller. This device is a peripheral device that uses the SPI bus for communication. + +endif diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile index 52a4a6fbe527..ef7963ff2006 100644 --- a/drivers/net/can/m_can/Makefile +++ b/drivers/net/can/m_can/Makefile @@ -4,5 +4,6 @@ # obj-$(CONFIG_CAN_M_CAN) += m_can.o +obj-$(CONFIG_CAN_M_CAN_PCI) += m_can_pci.o obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 61a93b192037..2c9f12401276 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -5,8 +5,7 @@ // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ /* Bosch M_CAN user manual can be obtained from: - * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ - * mcan_users_manual_v302.pdf + * https://github.com/linux-can/can-doc/tree/master/m_can */ #include <linux/interrupt.h> @@ -40,7 +39,7 @@ enum m_can_reg { M_CAN_TOCV = 0x2c, M_CAN_ECR = 0x40, M_CAN_PSR = 0x44, -/* TDCR Register only available for version >=3.1.x */ + /* TDCR Register only available for version >=3.1.x */ M_CAN_TDCR = 0x48, M_CAN_IR = 0x50, M_CAN_IE = 0x54, @@ -336,7 +335,7 @@ static u32 m_can_fifo_read(struct m_can_classdev *cdev, u32 fgi, unsigned int offset) { u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE + - offset; + offset; return cdev->ops->read_fifo(cdev, addr_offset); } @@ -345,7 +344,7 @@ static void m_can_fifo_write(struct m_can_classdev *cdev, u32 fpi, unsigned int offset, u32 val) { u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE + - offset; + offset; cdev->ops->write_fifo(cdev, addr_offset, val); } @@ -359,17 +358,17 @@ static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev, static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset) { u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE + - offset; + offset; return cdev->ops->read_fifo(cdev, addr_offset); } static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev) { - return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); + return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF); } -void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) +static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) { u32 cccr = m_can_read(cdev, M_CAN_CCCR); u32 timeout = 10; @@ -380,10 +379,6 @@ void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) cccr &= ~CCCR_CSR; if (enable) { - /* Clear the Clock stop request if it was set */ - if (cccr & CCCR_CSR) - cccr &= ~CCCR_CSR; - /* enable m_can configuration */ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); udelay(5); @@ -457,9 +452,9 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs) } if (dlc & RX_BUF_FDF) - cf->len = can_dlc2len((dlc >> 16) & 0x0F); + cf->len = can_fd_dlc2len((dlc >> 16) & 0x0F); else - cf->len = get_can_dlc((dlc >> 16) & 0x0F); + cf->len = can_cc_dlc2len((dlc >> 16) & 0x0F); id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) @@ -596,7 +591,7 @@ static int m_can_handle_lec_err(struct net_device *dev, } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -617,18 +612,10 @@ static int __m_can_get_berr_counter(const struct net_device *dev, static int m_can_clk_start(struct m_can_classdev *cdev) { - int err; - if (cdev->pm_clock_support == 0) return 0; - err = pm_runtime_get_sync(cdev->dev); - if (err < 0) { - pm_runtime_put_noidle(cdev->dev); - return err; - } - - return 0; + return pm_runtime_resume_and_get(cdev->dev); } static void m_can_clk_stop(struct m_can_classdev *cdev) @@ -723,7 +710,7 @@ static int m_can_handle_state_change(struct net_device *dev, } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); return 1; @@ -926,14 +913,13 @@ static void m_can_echo_tx_event(struct net_device *dev) m_can_txefs = m_can_read(cdev, M_CAN_TXEFS); /* Get Tx Event fifo element count */ - txe_count = (m_can_txefs & TXEFS_EFFL_MASK) - >> TXEFS_EFFL_SHIFT; + txe_count = (m_can_txefs & TXEFS_EFFL_MASK) >> TXEFS_EFFL_SHIFT; /* Get and process all sent elements */ for (i = 0; i < txe_count; i++) { /* retrieve get index */ - fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) - >> TXEFS_EFGI_SHIFT; + fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >> + TXEFS_EFGI_SHIFT; /* get message marker */ msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) & @@ -1092,7 +1078,7 @@ static int m_can_set_bittiming(struct net_device *dev) * Transmitter Delay Compensation Section */ tdco = (cdev->can.clock.freq / 1000) * - ssp / dbt->bitrate; + ssp / dbt->bitrate; /* Max valid TDCO value is 127 */ if (tdco > 127) { @@ -1107,9 +1093,9 @@ static int m_can_set_bittiming(struct net_device *dev) } reg_btp |= (brp << DBTP_DBRP_SHIFT) | - (sjw << DBTP_DSJW_SHIFT) | - (tseg1 << DBTP_DTSEG1_SHIFT) | - (tseg2 << DBTP_DTSEG2_SHIFT); + (sjw << DBTP_DSJW_SHIFT) | + (tseg1 << DBTP_DTSEG1_SHIFT) | + (tseg2 << DBTP_DTSEG2_SHIFT); m_can_write(cdev, M_CAN_DBTP, reg_btp); } @@ -1142,7 +1128,7 @@ static void m_can_chip_config(struct net_device *dev) if (cdev->version == 30) { /* only support one Tx Buffer currently */ m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) | - cdev->mcfg[MRAM_TXB].off); + cdev->mcfg[MRAM_TXB].off); } else { /* TX FIFO is used for newer IP Core versions */ m_can_write(cdev, M_CAN_TXBC, @@ -1156,7 +1142,7 @@ static void m_can_chip_config(struct net_device *dev) /* TX Event FIFO */ if (cdev->version == 30) { m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) | - cdev->mcfg[MRAM_TXE].off); + cdev->mcfg[MRAM_TXE].off); } else { /* Full TX Event FIFO is used */ m_can_write(cdev, M_CAN_TXEFC, @@ -1168,27 +1154,27 @@ static void m_can_chip_config(struct net_device *dev) /* rx fifo configuration, blocking mode, fifo size 1 */ m_can_write(cdev, M_CAN_RXF0C, (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) | - cdev->mcfg[MRAM_RXF0].off); + cdev->mcfg[MRAM_RXF0].off); m_can_write(cdev, M_CAN_RXF1C, (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) | - cdev->mcfg[MRAM_RXF1].off); + cdev->mcfg[MRAM_RXF1].off); cccr = m_can_read(cdev, M_CAN_CCCR); test = m_can_read(cdev, M_CAN_TEST); test &= ~TEST_LBCK; if (cdev->version == 30) { - /* Version 3.0.x */ + /* Version 3.0.x */ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR | - (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | - (CCCR_CME_MASK << CCCR_CME_SHIFT)); + (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | + (CCCR_CME_MASK << CCCR_CME_SHIFT)); if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; } else { - /* Version 3.1.x or 3.2.x */ + /* Version 3.1.x or 3.2.x */ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | CCCR_NISO | CCCR_DAR); @@ -1333,80 +1319,79 @@ static bool m_can_niso_supported(struct m_can_classdev *cdev) return !niso_timeout; } -static int m_can_dev_setup(struct m_can_classdev *m_can_dev) +static int m_can_dev_setup(struct m_can_classdev *cdev) { - struct net_device *dev = m_can_dev->net; + struct net_device *dev = cdev->net; int m_can_version; - m_can_version = m_can_check_core_release(m_can_dev); + m_can_version = m_can_check_core_release(cdev); /* return if unsupported version */ if (!m_can_version) { - dev_err(m_can_dev->dev, "Unsupported version number: %2d", + dev_err(cdev->dev, "Unsupported version number: %2d", m_can_version); return -EINVAL; } - if (!m_can_dev->is_peripheral) - netif_napi_add(dev, &m_can_dev->napi, + if (!cdev->is_peripheral) + netif_napi_add(dev, &cdev->napi, m_can_poll, M_CAN_NAPI_WEIGHT); /* Shared properties of all M_CAN versions */ - m_can_dev->version = m_can_version; - m_can_dev->can.do_set_mode = m_can_set_mode; - m_can_dev->can.do_get_berr_counter = m_can_get_berr_counter; + cdev->version = m_can_version; + cdev->can.do_set_mode = m_can_set_mode; + cdev->can.do_get_berr_counter = m_can_get_berr_counter; /* Set M_CAN supported operations */ - m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | - CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING | - CAN_CTRLMODE_FD | - CAN_CTRLMODE_ONE_SHOT; + cdev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_ONE_SHOT; /* Set properties depending on M_CAN version */ - switch (m_can_dev->version) { + switch (cdev->version) { case 30: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? - m_can_dev->bit_timing : &m_can_bittiming_const_30X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_30X; - m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? - m_can_dev->data_timing : - &m_can_data_bittiming_const_30X; + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_30X; break; case 31: /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO); - m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? - m_can_dev->bit_timing : &m_can_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; - m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? - m_can_dev->data_timing : - &m_can_data_bittiming_const_31X; + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; break; case 32: case 33: /* Support both MCAN version v3.2.x and v3.3.0 */ - m_can_dev->can.bittiming_const = m_can_dev->bit_timing ? - m_can_dev->bit_timing : &m_can_bittiming_const_31X; + cdev->can.bittiming_const = cdev->bit_timing ? + cdev->bit_timing : &m_can_bittiming_const_31X; - m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ? - m_can_dev->data_timing : - &m_can_data_bittiming_const_31X; + cdev->can.data_bittiming_const = cdev->data_timing ? + cdev->data_timing : + &m_can_data_bittiming_const_31X; - m_can_dev->can.ctrlmode_supported |= - (m_can_niso_supported(m_can_dev) - ? CAN_CTRLMODE_FD_NON_ISO - : 0); + cdev->can.ctrlmode_supported |= + (m_can_niso_supported(cdev) ? + CAN_CTRLMODE_FD_NON_ISO : 0); break; default: - dev_err(m_can_dev->dev, "Unsupported version number: %2d", - m_can_dev->version); + dev_err(cdev->dev, "Unsupported version number: %2d", + cdev->version); return -EINVAL; } - if (m_can_dev->ops->init) - m_can_dev->ops->init(m_can_dev); + if (cdev->ops->init) + cdev->ops->init(cdev); return 0; } @@ -1491,7 +1476,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) /* message ram configuration */ m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id); m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC, - can_len2dlc(cf->len) << 16); + can_fd_len2dlc(cf->len) << 16); for (i = 0; i < cf->len; i += 4) m_can_fifo_write(cdev, 0, @@ -1539,7 +1524,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) /* get put index for frame */ putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK) - >> TXFQS_TFQPI_SHIFT); + >> TXFQS_TFQPI_SHIFT); /* Write ID Field to FIFO Element */ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id); @@ -1559,7 +1544,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC, ((putidx << TX_BUF_MM_SHIFT) & TX_BUF_MM_MASK) | - (can_len2dlc(cf->len) << 16) | + (can_fd_len2dlc(cf->len) << 16) | fdflags | TX_BUF_EFC); for (i = 0; i < cf->len; i += 4) @@ -1586,7 +1571,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev) static void m_can_tx_work_queue(struct work_struct *ws) { struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev, - tx_work); + tx_work); m_can_tx_handler(cdev); cdev->tx_skb = NULL; @@ -1710,26 +1695,26 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev, cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0]; cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1]; cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off + - cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2]; cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off + - cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] & - (RXFC_FS_MASK >> RXFC_FS_SHIFT); + (RXFC_FS_MASK >> RXFC_FS_SHIFT); cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off + - cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] & - (RXFC_FS_MASK >> RXFC_FS_SHIFT); + (RXFC_FS_MASK >> RXFC_FS_SHIFT); cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off + - cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; cdev->mcfg[MRAM_RXB].num = mram_config_vals[5]; cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off + - cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; cdev->mcfg[MRAM_TXE].num = mram_config_vals[6]; cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off + - cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] & - (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); + (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT); dev_dbg(cdev->dev, "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", @@ -1758,15 +1743,15 @@ void m_can_init_ram(struct m_can_classdev *cdev) } EXPORT_SYMBOL_GPL(m_can_init_ram); -int m_can_class_get_clocks(struct m_can_classdev *m_can_dev) +int m_can_class_get_clocks(struct m_can_classdev *cdev) { int ret = 0; - m_can_dev->hclk = devm_clk_get(m_can_dev->dev, "hclk"); - m_can_dev->cclk = devm_clk_get(m_can_dev->dev, "cclk"); + cdev->hclk = devm_clk_get(cdev->dev, "hclk"); + cdev->cclk = devm_clk_get(cdev->dev, "cclk"); - if (IS_ERR(m_can_dev->cclk)) { - dev_err(m_can_dev->dev, "no clock found\n"); + if (IS_ERR(cdev->cclk)) { + dev_err(cdev->dev, "no clock found\n"); ret = -ENODEV; } @@ -1774,7 +1759,8 @@ int m_can_class_get_clocks(struct m_can_classdev *m_can_dev) } EXPORT_SYMBOL_GPL(m_can_class_get_clocks); -struct m_can_classdev *m_can_class_allocate_dev(struct device *dev) +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, + int sizeof_priv) { struct m_can_classdev *class_dev = NULL; u32 mram_config_vals[MRAM_CFG_LEN]; @@ -1797,7 +1783,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev) tx_fifo_size = mram_config_vals[7]; /* allocate the m_can device */ - net_dev = alloc_candev(sizeof(*class_dev), tx_fifo_size); + net_dev = alloc_candev(sizeof_priv, tx_fifo_size); if (!net_dev) { dev_err(dev, "Failed to allocate CAN device"); goto out; @@ -1825,54 +1811,56 @@ void m_can_class_free_dev(struct net_device *net) } EXPORT_SYMBOL_GPL(m_can_class_free_dev); -int m_can_class_register(struct m_can_classdev *m_can_dev) +int m_can_class_register(struct m_can_classdev *cdev) { int ret; - if (m_can_dev->pm_clock_support) { - pm_runtime_enable(m_can_dev->dev); - ret = m_can_clk_start(m_can_dev); + if (cdev->pm_clock_support) { + ret = m_can_clk_start(cdev); if (ret) - goto pm_runtime_fail; + return ret; } - ret = m_can_dev_setup(m_can_dev); + ret = m_can_dev_setup(cdev); if (ret) goto clk_disable; - ret = register_m_can_dev(m_can_dev->net); + ret = register_m_can_dev(cdev->net); if (ret) { - dev_err(m_can_dev->dev, "registering %s failed (err=%d)\n", - m_can_dev->net->name, ret); + dev_err(cdev->dev, "registering %s failed (err=%d)\n", + cdev->net->name, ret); goto clk_disable; } - devm_can_led_init(m_can_dev->net); + devm_can_led_init(cdev->net); - of_can_transceiver(m_can_dev->net); + of_can_transceiver(cdev->net); - dev_info(m_can_dev->dev, "%s device registered (irq=%d, version=%d)\n", - KBUILD_MODNAME, m_can_dev->net->irq, m_can_dev->version); + dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n", + KBUILD_MODNAME, cdev->net->irq, cdev->version); /* Probe finished * Stop clocks. They will be reactivated once the M_CAN device is opened */ clk_disable: - m_can_clk_stop(m_can_dev); -pm_runtime_fail: - if (ret) { - if (m_can_dev->pm_clock_support) - pm_runtime_disable(m_can_dev->dev); - } + m_can_clk_stop(cdev); return ret; } EXPORT_SYMBOL_GPL(m_can_class_register); +void m_can_class_unregister(struct m_can_classdev *cdev) +{ + unregister_candev(cdev->net); + + m_can_clk_stop(cdev); +} +EXPORT_SYMBOL_GPL(m_can_class_unregister); + int m_can_class_suspend(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_classdev *cdev = netdev_priv(ndev); + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct net_device *ndev = cdev->net; if (netif_running(ndev)) { netif_stop_queue(ndev); @@ -1891,8 +1879,8 @@ EXPORT_SYMBOL_GPL(m_can_class_suspend); int m_can_class_resume(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_classdev *cdev = netdev_priv(ndev); + struct m_can_classdev *cdev = dev_get_drvdata(dev); + struct net_device *ndev = cdev->net; pinctrl_pm_select_default_state(dev); @@ -1915,14 +1903,6 @@ int m_can_class_resume(struct device *dev) } EXPORT_SYMBOL_GPL(m_can_class_resume); -void m_can_class_unregister(struct m_can_classdev *m_can_dev) -{ - unregister_candev(m_can_dev->net); - - m_can_clk_stop(m_can_dev); -} -EXPORT_SYMBOL_GPL(m_can_class_unregister); - MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index b2699a7c9997..3fda84cef351 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -86,10 +86,7 @@ struct m_can_classdev { struct m_can_ops *ops; - void *device_data; - int version; - int freq; u32 irqstatus; int pm_clock_support; @@ -98,13 +95,12 @@ struct m_can_classdev { struct mram_cfg mcfg[MRAM_CFG_NUM]; }; -struct m_can_classdev *m_can_class_allocate_dev(struct device *dev); +struct m_can_classdev *m_can_class_allocate_dev(struct device *dev, int sizeof_priv); void m_can_class_free_dev(struct net_device *net); int m_can_class_register(struct m_can_classdev *cdev); void m_can_class_unregister(struct m_can_classdev *cdev); int m_can_class_get_clocks(struct m_can_classdev *cdev); void m_can_init_ram(struct m_can_classdev *priv); -void m_can_config_endisable(struct m_can_classdev *priv, bool enable); int m_can_class_suspend(struct device *dev); int m_can_class_resume(struct device *dev); diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c new file mode 100644 index 000000000000..128808605c3f --- /dev/null +++ b/drivers/net/can/m_can/m_can_pci.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Specific M_CAN Glue + * + * Copyright (C) 2018-2020 Intel Corporation + * Author: Felipe Balbi (Intel) + * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com> + * Author: Raymond Tan <raymond.tan@intel.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> + +#include "m_can.h" + +#define M_CAN_PCI_MMIO_BAR 0 + +#define M_CAN_CLOCK_FREQ_EHL 100000000 +#define CTL_CSR_INT_CTL_OFFSET 0x508 + +struct m_can_pci_priv { + struct m_can_classdev cdev; + + void __iomem *base; +}; + +static inline struct m_can_pci_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct m_can_pci_priv, cdev); +} + +static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + return readl(priv->base + reg); +} + +static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + return readl(priv->base + offset); +} + +static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + writel(val, priv->base + reg); + + return 0; +} + +static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) +{ + struct m_can_pci_priv *priv = cdev_to_priv(cdev); + + writel(val, priv->base + offset); + + return 0; +} + +static struct m_can_ops m_can_pci_ops = { + .read_reg = iomap_read_reg, + .write_reg = iomap_write_reg, + .write_fifo = iomap_write_fifo, + .read_fifo = iomap_read_fifo, +}; + +static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + struct device *dev = &pci->dev; + struct m_can_classdev *mcan_class; + struct m_can_pci_priv *priv; + void __iomem *base; + int ret; + + ret = pcim_enable_device(pci); + if (ret) + return ret; + + pci_set_master(pci); + + ret = pcim_iomap_regions(pci, BIT(M_CAN_PCI_MMIO_BAR), pci_name(pci)); + if (ret) + return ret; + + base = pcim_iomap_table(pci)[M_CAN_PCI_MMIO_BAR]; + + if (!base) { + dev_err(dev, "failed to map BARs\n"); + return -ENOMEM; + } + + mcan_class = m_can_class_allocate_dev(&pci->dev, + sizeof(struct m_can_pci_priv)); + if (!mcan_class) + return -ENOMEM; + + priv = cdev_to_priv(mcan_class); + + priv->base = base; + + ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + mcan_class->dev = &pci->dev; + mcan_class->net->irq = pci_irq_vector(pci, 0); + mcan_class->pm_clock_support = 1; + mcan_class->can.clock.freq = id->driver_data; + mcan_class->ops = &m_can_pci_ops; + + pci_set_drvdata(pci, mcan_class); + + ret = m_can_class_register(mcan_class); + if (ret) + goto err; + + /* Enable interrupt control at CAN wrapper IP */ + writel(0x1, base + CTL_CSR_INT_CTL_OFFSET); + + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put_noidle(dev); + pm_runtime_allow(dev); + + return 0; + +err: + pci_free_irq_vectors(pci); + return ret; +} + +static void m_can_pci_remove(struct pci_dev *pci) +{ + struct m_can_classdev *mcan_class = pci_get_drvdata(pci); + struct m_can_pci_priv *priv = cdev_to_priv(mcan_class); + + pm_runtime_forbid(&pci->dev); + pm_runtime_get_noresume(&pci->dev); + + /* Disable interrupt control at CAN wrapper IP */ + writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET); + + m_can_class_unregister(mcan_class); + pci_free_irq_vectors(pci); +} + +static __maybe_unused int m_can_pci_suspend(struct device *dev) +{ + return m_can_class_suspend(dev); +} + +static __maybe_unused int m_can_pci_resume(struct device *dev) +{ + return m_can_class_resume(dev); +} + +static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, + m_can_pci_suspend, m_can_pci_resume); + +static const struct pci_device_id m_can_pci_id_table[] = { + { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, + { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, + { } /* Terminating Entry */ +}; +MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); + +static struct pci_driver m_can_pci_driver = { + .name = "m_can_pci", + .probe = m_can_pci_probe, + .remove = m_can_pci_remove, + .id_table = m_can_pci_id_table, + .driver = { + .pm = &m_can_pci_pm_ops, + }, +}; + +module_pci_driver(m_can_pci_driver); + +MODULE_AUTHOR("Felipe Balbi (Intel)"); +MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>"); +MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller on PCI bus"); diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index 161cb9be018c..599de0e08cd7 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -10,27 +10,34 @@ #include "m_can.h" struct m_can_plat_priv { + struct m_can_classdev cdev; + void __iomem *base; void __iomem *mram_base; }; +static inline struct m_can_plat_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct m_can_plat_priv, cdev); +} + static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) { - struct m_can_plat_priv *priv = cdev->device_data; + struct m_can_plat_priv *priv = cdev_to_priv(cdev); return readl(priv->base + reg); } static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset) { - struct m_can_plat_priv *priv = cdev->device_data; + struct m_can_plat_priv *priv = cdev_to_priv(cdev); return readl(priv->mram_base + offset); } static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) { - struct m_can_plat_priv *priv = cdev->device_data; + struct m_can_plat_priv *priv = cdev_to_priv(cdev); writel(val, priv->base + reg); @@ -39,7 +46,7 @@ static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val) { - struct m_can_plat_priv *priv = cdev->device_data; + struct m_can_plat_priv *priv = cdev_to_priv(cdev); writel(val, priv->mram_base + offset); @@ -62,17 +69,12 @@ static int m_can_plat_probe(struct platform_device *pdev) void __iomem *mram_addr; int irq, ret = 0; - mcan_class = m_can_class_allocate_dev(&pdev->dev); + mcan_class = m_can_class_allocate_dev(&pdev->dev, + sizeof(struct m_can_plat_priv)); if (!mcan_class) return -ENOMEM; - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto probe_fail; - } - - mcan_class->device_data = priv; + priv = cdev_to_priv(mcan_class); ret = m_can_class_get_clocks(mcan_class); if (ret) @@ -111,12 +113,19 @@ static int m_can_plat_probe(struct platform_device *pdev) mcan_class->is_peripheral = false; - platform_set_drvdata(pdev, mcan_class->net); + platform_set_drvdata(pdev, mcan_class); m_can_init_ram(mcan_class); - return m_can_class_register(mcan_class); + pm_runtime_enable(mcan_class->dev); + ret = m_can_class_register(mcan_class); + if (ret) + goto out_runtime_disable; + + return ret; +out_runtime_disable: + pm_runtime_disable(mcan_class->dev); probe_fail: m_can_class_free_dev(mcan_class->net); return ret; @@ -134,22 +143,20 @@ static __maybe_unused int m_can_resume(struct device *dev) static int m_can_plat_remove(struct platform_device *pdev) { - struct net_device *dev = platform_get_drvdata(pdev); - struct m_can_classdev *mcan_class = netdev_priv(dev); + struct m_can_plat_priv *priv = platform_get_drvdata(pdev); + struct m_can_classdev *mcan_class = &priv->cdev; m_can_class_unregister(mcan_class); m_can_class_free_dev(mcan_class->net); - platform_set_drvdata(pdev, NULL); - return 0; } static int __maybe_unused m_can_runtime_suspend(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_classdev *mcan_class = netdev_priv(ndev); + struct m_can_plat_priv *priv = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = &priv->cdev; clk_disable_unprepare(mcan_class->cclk); clk_disable_unprepare(mcan_class->hclk); @@ -159,8 +166,8 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev) static int __maybe_unused m_can_runtime_resume(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct m_can_classdev *mcan_class = netdev_priv(ndev); + struct m_can_plat_priv *priv = dev_get_drvdata(dev); + struct m_can_classdev *mcan_class = &priv->cdev; int err; err = clk_prepare_enable(mcan_class->hclk); diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c index 7347ab39c5b6..24c737c4fc44 100644 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@ -114,21 +114,23 @@ #define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) struct tcan4x5x_priv { + struct m_can_classdev cdev; + struct regmap *regmap; struct spi_device *spi; - struct m_can_classdev *mcan_dev; - struct gpio_desc *reset_gpio; struct gpio_desc *device_wake_gpio; struct gpio_desc *device_state_gpio; struct regulator *power; - - /* Register based ip */ - int mram_start; - int reg_offset; }; +static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev) +{ + return container_of(cdev, struct tcan4x5x_priv, cdev); + +} + static struct can_bittiming_const tcan4x5x_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 2, @@ -257,37 +259,37 @@ static struct regmap_bus tcan4x5x_bus = { static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) { - struct tcan4x5x_priv *priv = cdev->device_data; + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); u32 val; - regmap_read(priv->regmap, priv->reg_offset + reg, &val); + regmap_read(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, &val); return val; } static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset) { - struct tcan4x5x_priv *priv = cdev->device_data; + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); u32 val; - regmap_read(priv->regmap, priv->mram_start + addr_offset, &val); + regmap_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, &val); return val; } static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) { - struct tcan4x5x_priv *priv = cdev->device_data; + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); - return regmap_write(priv->regmap, priv->reg_offset + reg, val); + return regmap_write(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, val); } static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, int addr_offset, int val) { - struct tcan4x5x_priv *priv = cdev->device_data; + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); - return regmap_write(priv->regmap, priv->mram_start + addr_offset, val); + return regmap_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val); } static int tcan4x5x_power_enable(struct regulator *reg, int enable) @@ -304,7 +306,7 @@ static int tcan4x5x_power_enable(struct regulator *reg, int enable) static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, int reg, int val) { - struct tcan4x5x_priv *priv = cdev->device_data; + struct tcan4x5x_priv *priv = cdev_to_priv(cdev); return regmap_write(priv->regmap, reg, val); } @@ -328,17 +330,13 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) if (ret) return ret; - ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS, - TCAN4X5X_CLEAR_ALL_INT); - if (ret) - return ret; - - return ret; + return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS, + TCAN4X5X_CLEAR_ALL_INT); } static int tcan4x5x_init(struct m_can_classdev *cdev) { - struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; tcan4x5x_check_wake(tcan4x5x); @@ -365,7 +363,7 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) { - struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_DISABLE_WAKE_MSK, 0x00); @@ -373,15 +371,15 @@ static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) static int tcan4x5x_disable_state(struct m_can_classdev *cdev) { - struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_DISABLE_INH_MSK, 0x01); } -static int tcan4x5x_parse_config(struct m_can_classdev *cdev) +static int tcan4x5x_get_gpios(struct m_can_classdev *cdev) { - struct tcan4x5x_priv *tcan4x5x = cdev->device_data; + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", @@ -435,15 +433,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi) struct m_can_classdev *mcan_class; int freq, ret; - mcan_class = m_can_class_allocate_dev(&spi->dev); + mcan_class = m_can_class_allocate_dev(&spi->dev, + sizeof(struct tcan4x5x_priv)); if (!mcan_class) return -ENOMEM; - priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto out_m_can_class_free_dev; - } + priv = cdev_to_priv(mcan_class); priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); if (PTR_ERR(priv->power) == -EPROBE_DEFER) { @@ -453,8 +448,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi) priv->power = NULL; } - mcan_class->device_data = priv; - m_can_class_get_clocks(mcan_class); if (IS_ERR(mcan_class->cclk)) { dev_err(&spi->dev, "no CAN clock source defined\n"); @@ -469,10 +462,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi) goto out_m_can_class_free_dev; } - priv->reg_offset = TCAN4X5X_MCAN_OFFSET; - priv->mram_start = TCAN4X5X_MRAM_START; priv->spi = spi; - priv->mcan_dev = mcan_class; mcan_class->pm_clock_support = 0; mcan_class->can.clock.freq = freq; @@ -502,7 +492,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi) if (ret) goto out_m_can_class_free_dev; - ret = tcan4x5x_parse_config(mcan_class); + ret = tcan4x5x_get_gpios(mcan_class); if (ret) goto out_power; @@ -521,8 +511,6 @@ out_power: tcan4x5x_power_enable(priv->power, 0); out_m_can_class_free_dev: m_can_class_free_dev(mcan_class->net); - dev_err(&spi->dev, "Probe failed, err=%d\n", ret); - return ret; } @@ -530,11 +518,11 @@ static int tcan4x5x_can_remove(struct spi_device *spi) { struct tcan4x5x_priv *priv = spi_get_drvdata(spi); - m_can_class_unregister(priv->mcan_dev); + m_can_class_unregister(&priv->cdev); tcan4x5x_power_enable(priv->power, 0); - m_can_class_free_dev(priv->mcan_dev->net); + m_can_class_free_dev(priv->cdev.net); return 0; } diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 640ba1b356ec..5ed00a1558e1 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -250,16 +250,16 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) void __iomem *data = ®s->tx.dsr1_0; u16 *payload = (u16 *)frame->data; - for (i = 0; i < frame->can_dlc / 2; i++) { + for (i = 0; i < frame->len / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* write remaining byte if necessary */ - if (frame->can_dlc & 1) - out_8(data, frame->data[frame->can_dlc - 1]); + if (frame->len & 1) + out_8(data, frame->data[frame->len - 1]); } - out_8(®s->tx.dlr, frame->can_dlc); + out_8(®s->tx.dlr, frame->len); out_8(®s->tx.tbpr, priv->cur_pri); /* Start transmission. */ @@ -312,19 +312,19 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) if (can_id & 1) frame->can_id |= CAN_RTR_FLAG; - frame->can_dlc = get_can_dlc(in_8(®s->rx.dlr) & 0xf); + frame->len = can_cc_dlc2len(in_8(®s->rx.dlr) & 0xf); if (!(frame->can_id & CAN_RTR_FLAG)) { void __iomem *data = ®s->rx.dsr1_0; u16 *payload = (u16 *)frame->data; - for (i = 0; i < frame->can_dlc / 2; i++) { + for (i = 0; i < frame->len / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* read remaining byte if necessary */ - if (frame->can_dlc & 1) - frame->data[frame->can_dlc - 1] = in_8(data); + if (frame->len & 1) + frame->data[frame->len - 1] = in_8(data); } out_8(®s->canrflg, MSCAN_RXF); @@ -372,7 +372,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, } } priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; - frame->can_dlc = CAN_ERR_DLC; + frame->len = CAN_ERR_DLC; out_8(®s->canrflg, MSCAN_ERR_IF); } @@ -407,7 +407,7 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota) mscan_get_err_frame(dev, frame, canrflg); stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; + stats->rx_bytes += frame->len; work_done++; netif_receive_skb(skb); } diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 5c180d2f3c3c..4f9e7ec192aa 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -563,7 +563,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) netif_receive_skb(skb); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; } static irqreturn_t pch_can_interrupt(int irq, void *dev_id) @@ -683,10 +683,10 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) if (id2 & PCH_ID2_DIR) cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = get_can_dlc((ioread32(&priv->regs-> + cf->len = can_cc_dlc2len((ioread32(&priv->regs-> ifregs[0].mcont)) & 0xF); - for (i = 0; i < cf->can_dlc; i += 2) { + for (i = 0; i < cf->len; i += 2) { data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]); cf->data[i] = data_reg; cf->data[i + 1] = data_reg >> 8; @@ -696,7 +696,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota) rcv_pkts++; stats->rx_packets++; quota--; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; pch_fifo_thresh(priv, obj_num); obj_num++; @@ -715,7 +715,7 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat) iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND, &priv->regs->ifregs[1].cmask); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat); - dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) & + dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) & PCH_IF_MCONT_DLC); stats->tx_bytes += dlc; stats->tx_packets++; @@ -919,7 +919,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) iowrite32(id2, &priv->regs->ifregs[1].id2); /* Copy data to register */ - for (i = 0; i < cf->can_dlc; i += 2) { + for (i = 0; i < cf->len; i += 2) { iowrite16(cf->data[i] | (cf->data[i + 1] << 8), &priv->regs->ifregs[1].data[i / 2]); } @@ -927,7 +927,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1); /* Set the size of the data. Update if2_mcont */ - iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT | + iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT | PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont); pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no); diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 40c33b8a5fda..c5334b0c3038 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -257,9 +257,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, u8 cf_len; if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) - cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg))); + cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg)); else - cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); + cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg)); /* if this frame is an echo, */ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) { @@ -410,7 +410,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv, } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; pucan_netif_rx(skb, msg->ts_low, msg->ts_high); return 0; @@ -438,7 +438,7 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; stats->rx_packets++; netif_rx(skb); @@ -652,7 +652,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, unsigned long flags; bool should_stop_tx_queue; int room_left; - u8 can_dlc; + u8 len; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; @@ -682,7 +682,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, if (can_is_canfd_skb(skb)) { /* CAN FD frame format */ - can_dlc = can_len2dlc(cf->len); + len = can_fd_len2dlc(cf->len); msg_flags |= PUCAN_MSG_EXT_DATA_LEN; @@ -693,7 +693,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAN 2.0 frame format */ - can_dlc = cf->len; + len = cf->len; if (cf->can_id & CAN_RTR_FLAG) msg_flags |= PUCAN_MSG_RTR; @@ -707,7 +707,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, msg_flags |= PUCAN_MSG_SELF_RECEIVE; msg->flags = cpu_to_le16(msg_flags); - msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc); + msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len); memcpy(msg->d, cf->data, cf->len); /* struct msg client field is used as an index in the echo skbs ring */ diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 48575900adb7..c803327f8f79 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -364,7 +364,7 @@ static void rcar_can_error(struct net_device *ndev) if (skb) { stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } } @@ -607,16 +607,16 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ data |= RCAR_CAN_RTR; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) writeb(cf->data[i], &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]); } writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id); - writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); + writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); - priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc; + priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len; can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH); priv->tx_head++; /* Start Tx: write 0xff to the TFPCR register to increment @@ -659,18 +659,18 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv) cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK; dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc); - cf->can_dlc = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); if (data & RCAR_CAN_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { - for (dlc = 0; dlc < cf->can_dlc; dlc++) + for (dlc = 0; dlc < cf->len; dlc++) cf->data[dlc] = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]); } can_led_event(priv->ndev, CAN_LED_EVENT_RX); - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); } diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index de59dd6aad29..2778ed5c61d1 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1025,7 +1025,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, rcar_canfd_write(priv->base, RCANFD_CERFL(ch), RCANFD_CERFL_ERR(~cerfl)); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1134,7 +1134,7 @@ static void rcar_canfd_state_change(struct net_device *ndev, can_change_state(ndev, cf, tx_state, rx_state); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } } @@ -1357,7 +1357,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_RTR_FLAG) id |= RCANFD_CFID_CFRTR; - dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len)); + dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { rcar_canfd_write(priv->base, @@ -1446,9 +1446,9 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (sts & RCANFD_RFFDSTS_RFFDF) - cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); + cf->len = can_fd_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); else - cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc)); + cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (sts & RCANFD_RFFDSTS_RFESI) { cf->flags |= CANFD_ESI; @@ -1464,7 +1464,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0)); } } else { - cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc)); + cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (id & RCANFD_RFID_RFRTR) cf->can_id |= CAN_RTR_FLAG; else diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index 6e95193b215b..3c1912c0430b 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -55,7 +55,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) work_done++; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_receive_skb(skb); } @@ -157,7 +157,7 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) /* There was a problem reading the mailbox, propagate * error value. */ - if (unlikely(IS_ERR(skb))) { + if (IS_ERR(skb)) { offload->dev->stats.rx_dropped++; offload->dev->stats.rx_fifo_errors++; diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 25a4d7d0b349..b6a7003c51d2 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -284,7 +284,6 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, struct sja1000_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; uint8_t fi; - uint8_t dlc; canid_t id; uint8_t dreg; u8 cmd_reg_val = 0x00; @@ -295,7 +294,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, netif_stop_queue(dev); - fi = dlc = cf->can_dlc; + fi = can_get_cc_dlc(cf, priv->can.ctrlmode); id = cf->can_id; if (id & CAN_RTR_FLAG) @@ -316,7 +315,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, priv->write_reg(priv, SJA1000_ID2, (id & 0x00000007) << 5); } - for (i = 0; i < dlc; i++) + for (i = 0; i < cf->len; i++) priv->write_reg(priv, dreg++, cf->data[i]); can_put_echo_skb(skb, dev, 0); @@ -367,11 +366,11 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, SJA1000_ID2) >> 5); } - cf->can_dlc = get_can_dlc(fi & 0x0F); + can_frame_set_cc_len(cf, fi & 0x0F, priv->can.ctrlmode); if (fi & SJA1000_FI_RTR) { id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) cf->data[i] = priv->read_reg(priv, dreg++); } @@ -381,7 +380,7 @@ static void sja1000_rx(struct net_device *dev) sja1000_write_cmdreg(priv, CMD_RRB); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); @@ -489,7 +488,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); return 0; @@ -637,7 +636,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv) CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | - CAN_CTRLMODE_PRESUME_ACK; + CAN_CTRLMODE_PRESUME_ACK | + CAN_CTRLMODE_CC_LEN8_DLC; spin_lock_init(&priv->cmdreg_lock); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index b4a39f0449ba..a1bd1be09548 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -106,8 +106,8 @@ static struct net_device **slcan_devs; /* * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended - * frame format) a data length code (can_dlc) which can be from 0 to 8 - * and up to <can_dlc> data bytes as payload. + * frame format) a data length code (len) which can be from 0 to 8 + * and up to <len> data bytes as payload. * Additionally a CAN frame may become a remote transmission frame if the * RTR-bit is set. This causes another ECU to send a CAN frame with the * given can_id. @@ -128,10 +128,10 @@ static struct net_device **slcan_devs; * * Examples: * - * t1230 : can_id 0x123, can_dlc 0, no data - * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33 - * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55 - * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request + * t1230 : can_id 0x123, len 0, no data + * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33 + * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55 + * r1230 : can_id 0x123, len 0, no data, remote transmission request * */ @@ -156,7 +156,7 @@ static void slc_bump(struct slcan *sl) fallthrough; case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ - cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; + cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN]; sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1; @@ -167,7 +167,7 @@ static void slc_bump(struct slcan *sl) case 'T': cf.can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ - cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; + cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN]; sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1; @@ -181,15 +181,15 @@ static void slc_bump(struct slcan *sl) cf.can_id |= tmpid; - /* get can_dlc from sanitized ASCII value */ - if (cf.can_dlc >= '0' && cf.can_dlc < '9') - cf.can_dlc -= '0'; + /* get len from sanitized ASCII value */ + if (cf.len >= '0' && cf.len < '9') + cf.len -= '0'; else return; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { - for (i = 0; i < cf.can_dlc; i++) { + for (i = 0; i < cf.len; i++) { tmp = hex_to_bin(*cmd++); if (tmp < 0) return; @@ -218,7 +218,7 @@ static void slc_bump(struct slcan *sl) skb_put_data(skb, &cf, sizeof(struct can_frame)); sl->dev->stats.rx_packets++; - sl->dev->stats.rx_bytes += cf.can_dlc; + sl->dev->stats.rx_bytes += cf.len; netif_rx_ni(skb); } @@ -282,11 +282,11 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf) pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN; - *pos++ = cf->can_dlc + '0'; + *pos++ = cf->len + '0'; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf->can_id & CAN_RTR_FLAG)) { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) pos = hex_byte_pack_upper(pos, cf->data[i]); } @@ -304,7 +304,7 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf) actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); sl->xleft = (pos - sl->xbuff) - actual; sl->xhead = sl->xbuff + actual; - sl->dev->stats.tx_bytes += cf->can_dlc; + sl->dev->stats.tx_bytes += cf->len; } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index ccd649a8e37b..7e1536877993 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -624,7 +624,7 @@ int softing_startstop(struct net_device *dev, int up) */ memset(&msg, 0, sizeof(msg)); msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; - msg.can_dlc = CAN_ERR_DLC; + msg.len = CAN_ERR_DLC; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!(bus_bitmask_start & (1 << j))) continue; diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index c9ca8b9fceb9..40070c930202 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -84,7 +84,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, if (priv->index) *ptr |= CMD_BUS2; ++ptr; - *ptr++ = cf->can_dlc; + *ptr++ = cf->len; *ptr++ = (cf->can_id >> 0); *ptr++ = (cf->can_id >> 8); if (cf->can_id & CAN_EFF_FLAG) { @@ -95,7 +95,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, ptr += 1; } if (!(cf->can_id & CAN_RTR_FLAG)) - memcpy(ptr, &cf->data[0], cf->can_dlc); + memcpy(ptr, &cf->data[0], cf->len); memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], buf, DPRAM_TX_SIZE); if (++fifo_wr >= DPRAM_TX_CNT) @@ -167,7 +167,7 @@ static int softing_handle_1(struct softing *card) iowrite8(0, &card->dpram[DPRAM_RX_LOST]); /* prepare msg */ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; - msg.can_dlc = CAN_ERR_DLC; + msg.len = CAN_ERR_DLC; msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; /* * service to all buses, we don't know which it was applicable @@ -218,7 +218,7 @@ static int softing_handle_1(struct softing *card) state = *ptr++; msg.can_id = CAN_ERR_FLAG; - msg.can_dlc = CAN_ERR_DLC; + msg.len = CAN_ERR_DLC; if (state & SF_MASK_BUSOFF) { can_state = CAN_STATE_BUS_OFF; @@ -261,7 +261,7 @@ static int softing_handle_1(struct softing *card) } else { if (cmd & CMD_RTR) msg.can_id |= CAN_RTR_FLAG; - msg.can_dlc = get_can_dlc(*ptr++); + msg.len = can_cc_dlc2len(*ptr++); if (cmd & CMD_XTD) { msg.can_id |= CAN_EFF_FLAG; msg.can_id |= le32_to_cpup((void *)ptr); @@ -294,7 +294,7 @@ static int softing_handle_1(struct softing *card) --card->tx.pending; ++netdev->stats.tx_packets; if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.tx_bytes += msg.can_dlc; + netdev->stats.tx_bytes += msg.len; } else { int ret; @@ -302,7 +302,7 @@ static int softing_handle_1(struct softing *card) if (ret == NET_RX_SUCCESS) { ++netdev->stats.rx_packets; if (!(msg.can_id & CAN_RTR_FLAG)) - netdev->stats.rx_bytes += msg.can_dlc; + netdev->stats.rx_bytes += msg.len; } else { ++netdev->stats.rx_dropped; } diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index 73d48c3b8ded..f9455de94786 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -277,13 +277,13 @@ static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame) ((frame->can_id & CAN_EFF_MASK) << 1) | ((frame->can_id & CAN_RTR_FLAG) ? 1 : 0); - buf[HI3110_FIFO_EXT_DLC_OFF] = frame->can_dlc; + buf[HI3110_FIFO_EXT_DLC_OFF] = frame->len; memcpy(buf + HI3110_FIFO_EXT_DATA_OFF, - frame->data, frame->can_dlc); + frame->data, frame->len); hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN - - (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc)); + (HI3110_CAN_MAX_DATA_LEN - frame->len)); } else { /* Standard frame */ buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_SFF_MASK) >> 3; @@ -291,13 +291,13 @@ static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame) ((frame->can_id & CAN_SFF_MASK) << 5) | ((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0); - buf[HI3110_FIFO_STD_DLC_OFF] = frame->can_dlc; + buf[HI3110_FIFO_STD_DLC_OFF] = frame->len; memcpy(buf + HI3110_FIFO_STD_DATA_OFF, - frame->data, frame->can_dlc); + frame->data, frame->len); hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN - - (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc)); + (HI3110_CAN_MAX_DATA_LEN - frame->len)); } } @@ -341,16 +341,16 @@ static void hi3110_hw_rx(struct spi_device *spi) } /* Data length */ - frame->can_dlc = get_can_dlc(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); + frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) frame->can_id |= CAN_RTR_FLAG; else memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF, - frame->can_dlc); + frame->len); priv->net->stats.rx_packets++; - priv->net->stats.rx_bytes += frame->can_dlc; + priv->net->stats.rx_bytes += frame->len; can_led_event(priv->net, CAN_LED_EVENT_RX); @@ -585,7 +585,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws) } else { frame = (struct can_frame *)priv->tx_skb->data; hi3110_hw_tx(spi, frame); - priv->tx_len = 1 + frame->can_dlc; + priv->tx_len = 1 + frame->len; can_put_echo_skb(priv->tx_skb, net, 0); priv->tx_skb = NULL; } diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 22d814ae4edc..25859d16d06f 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -644,9 +644,9 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK); buf[TXBEID8_OFF] = GET_BYTE(eid, 1); buf[TXBEID0_OFF] = GET_BYTE(eid, 0); - buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; - memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); - mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); + buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->len; + memcpy(buf + TXBDAT_OFF, frame->data, frame->len); + mcp251x_hw_tx_frame(spi, buf, frame->len, tx_buf_idx); /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); @@ -664,7 +664,7 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, for (i = 1; i < RXBDAT_OFF; i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); - len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); + len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); for (; i < (RXBDAT_OFF + len); i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); } else { @@ -720,11 +720,11 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) frame->can_id |= CAN_RTR_FLAG; } /* Data length */ - frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); - memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc); + frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); + memcpy(frame->data, buf + RXBDAT_OFF, frame->len); priv->net->stats.rx_packets++; - priv->net->stats.rx_bytes += frame->can_dlc; + priv->net->stats.rx_bytes += frame->len; can_led_event(priv->net, CAN_LED_EVENT_RX); @@ -998,10 +998,10 @@ static void mcp251x_tx_work_handler(struct work_struct *ws) } else { frame = (struct can_frame *)priv->tx_skb->data; - if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN) - frame->can_dlc = CAN_FRAME_MAX_DATA_LEN; + if (frame->len > CAN_FRAME_MAX_DATA_LEN) + frame->len = CAN_FRAME_MAX_DATA_LEN; mcp251x_hw_tx(spi, frame, 0); - priv->tx_len = 1 + frame->can_dlc; + priv->tx_len = 1 + frame->len; can_put_echo_skb(priv->tx_skb, net, 0); priv->tx_skb = NULL; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 8a39be076e14..77129d5f410b 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -326,17 +326,36 @@ mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) { + struct mcp251xfd_tef_ring *tef_ring; struct mcp251xfd_tx_ring *tx_ring; struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL; struct mcp251xfd_tx_obj *tx_obj; u32 val; u16 addr; u8 len; - int i; + int i, j; /* TEF */ - priv->tef.head = 0; - priv->tef.tail = 0; + tef_ring = priv->tef; + tef_ring->head = 0; + tef_ring->tail = 0; + + /* FIFO increment TEF tail pointer */ + addr = MCP251XFD_REG_TEFCON; + val = MCP251XFD_REG_TEFCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, + addr, val, val); + + for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) { + struct spi_transfer *xfer; + + xfer = &tef_ring->uinc_xfer[j]; + xfer->tx_buf = &tef_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } /* TX */ tx_ring = priv->tx; @@ -370,6 +389,23 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv) prev_rx_ring->obj_num; prev_rx_ring = rx_ring; + + /* FIFO increment RX tail pointer */ + addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); + val = MCP251XFD_REG_FIFOCON_UINC; + len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, + addr, val, val); + + for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { + struct spi_transfer *xfer; + + xfer = &rx_ring->uinc_xfer[j]; + xfer->tx_buf = &rx_ring->uinc_buf; + xfer->len = len; + xfer->cs_change = 1; + xfer->cs_change_delay.value = 0; + xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; + } } } @@ -416,7 +452,8 @@ static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) int rx_obj_num; rx_obj_num = ram_free / rx_obj_size; - rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32); + rx_obj_num = min(1 << (fls(rx_obj_num) - 1), + MCP251XFD_RX_OBJ_NUM_MAX); rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, GFP_KERNEL); @@ -644,10 +681,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv) return 0; } - if (err) - return err; - - return -ETIMEDOUT; + return err; } static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) @@ -931,7 +965,10 @@ static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) { u8 mode; - if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; + else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mode = MCP251XFD_REG_CON_MODE_LISTENONLY; else if (priv->can.ctrlmode & CAN_CTRLMODE_FD) mode = MCP251XFD_REG_CON_MODE_MIXED; @@ -1204,7 +1241,7 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? "not empty" : "empty", - seq, priv->tef.tail, priv->tef.head, tx_ring->head); + seq, priv->tef->tail, priv->tef->head, tx_ring->head); /* The Sequence Number in the TEF doesn't match our tef_tail. */ return -EAGAIN; @@ -1214,10 +1251,8 @@ static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_tef_obj *hw_tef_obj) { - struct mcp251xfd_tx_ring *tx_ring = priv->tx; struct net_device_stats *stats = &priv->ndev->stats; u32 seq, seq_masked, tef_tail_masked; - int err; seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, hw_tef_obj->flags); @@ -1228,7 +1263,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, */ seq_masked = seq & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); - tef_tail_masked = priv->tef.tail & + tef_tail_masked = priv->tef->tail & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); if (seq_masked != tef_tail_masked) return mcp251xfd_handle_tefif_recover(priv, seq); @@ -1238,18 +1273,9 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, mcp251xfd_get_tef_tail(priv), hw_tef_obj->ts); stats->tx_packets++; + priv->tef->tail++; - /* finally increment the TEF pointer */ - err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_TEFCON, - GENMASK(15, 8), - MCP251XFD_REG_TEFCON_UINC); - if (err) - return err; - - priv->tef.tail++; - tx_ring->tail++; - - return mcp251xfd_check_tef_tail(priv); + return 0; } static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) @@ -1266,12 +1292,12 @@ static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) /* chip_tx_tail, is the next TX-Object send by the HW. * The new TEF head must be >= the old head, ... */ - new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail; - if (new_head <= priv->tef.head) + new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; + if (new_head <= priv->tef->head) new_head += tx_ring->obj_num; /* ... but it cannot exceed the TX head. */ - priv->tef.head = min(new_head, tx_ring->head); + priv->tef->head = min(new_head, tx_ring->head); return mcp251xfd_check_tef_tail(priv); } @@ -1336,6 +1362,40 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) } out_netif_wake_queue: + len = i; /* number of handled goods TEFs */ + if (len) { + struct mcp251xfd_tef_ring *ring = priv->tef; + struct mcp251xfd_tx_ring *tx_ring = priv->tx; + struct spi_transfer *last_xfer; + + tx_ring->tail += len; + + /* Increment the TEF FIFO tail pointer 'len' times in + * a single SPI message. + */ + + /* Note: + * + * "cs_change == 1" on the last transfer results in an + * active chip select after the complete SPI + * message. This causes the controller to interpret + * the next register access as data. Temporary set + * "cs_change" of the last transfer to "0" to properly + * deactivate the chip select at the end of the + * message. + */ + last_xfer = &ring->uinc_xfer[len - 1]; + last_xfer->cs_change = 0; + err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); + last_xfer->cs_change = 1; + if (err) + return err; + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + } + mcp251xfd_ecc_tefif_successful(priv); if (mcp251xfd_get_tx_free(priv->tx)) { @@ -1405,12 +1465,12 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, cfd->flags |= CANFD_BRS; dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags); - cfd->len = can_dlc2len(get_canfd_dlc(dlc)); + cfd->len = can_fd_dlc2len(dlc); } else { if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) cfd->can_id |= CAN_RTR_FLAG; - cfd->len = get_can_dlc(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, + cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags)); } @@ -1442,13 +1502,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, if (err) stats->rx_fifo_errors++; - ring->tail++; - - /* finally increment the RX pointer */ - return regmap_update_bits(priv->map_reg, - MCP251XFD_REG_FIFOCON(ring->fifo_nr), - GENMASK(15, 8), - MCP251XFD_REG_FIFOCON_UINC); + return 0; } static inline int @@ -1480,6 +1534,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, return err; while ((len = mcp251xfd_get_rx_linear_len(ring))) { + struct spi_transfer *last_xfer; + rx_tail = mcp251xfd_get_rx_tail(ring); err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, @@ -1494,6 +1550,28 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, if (err) return err; } + + /* Increment the RX FIFO tail pointer 'len' times in a + * single SPI message. + */ + ring->tail += len; + + /* Note: + * + * "cs_change == 1" on the last transfer results in an + * active chip select after the complete SPI + * message. This causes the controller to interpret + * the next register access as data. Temporary set + * "cs_change" of the last transfer to "0" to properly + * deactivate the chip select at the end of the + * message. + */ + last_xfer = &ring->uinc_xfer[len - 1]; + last_xfer->cs_change = 0; + err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len); + last_xfer->cs_change = 1; + if (err) + return err; } return 0; @@ -2244,7 +2322,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, * harm, only the lower 7 bits will be transferred into the * TEF object. */ - dlc = can_len2dlc(cfd->len); + dlc = can_fd_len2dlc(cfd->len); flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) | FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc); @@ -2273,7 +2351,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, /* Clear data at end of CAN frame */ offset = round_down(cfd->len, sizeof(u32)); - len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset; + len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset; if (MCP251XFD_SANITIZE_CAN && len) memset(hw_tx_obj->data + offset, 0x0, len); memcpy(hw_tx_obj->data, cfd->data, cfd->len); @@ -2281,7 +2359,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, /* Number of bytes to be written into the RAM of the controller */ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); if (MCP251XFD_SANITIZE_CAN) - len += round_up(can_dlc2len(dlc), sizeof(u32)); + len += round_up(can_fd_dlc2len(dlc), sizeof(u32)); else len += round_up(cfd->len, sizeof(u32)); @@ -2806,9 +2884,9 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; - priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | - CAN_CTRLMODE_FD_NON_ISO; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; priv->ndev = ndev; priv->spi = spi; priv->rx_int = rx_int; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index fa1246e39980..cb6398c2a560 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -368,6 +368,7 @@ * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes */ +#define MCP251XFD_RX_OBJ_NUM_MAX 32 #define MCP251XFD_TX_OBJ_NUM_CAN 8 #define MCP251XFD_TX_OBJ_NUM_CANFD 4 @@ -458,14 +459,6 @@ struct mcp251xfd_hw_rx_obj_canfd { u8 data[sizeof_field(struct canfd_frame, data)]; }; -struct mcp251xfd_tef_ring { - unsigned int head; - unsigned int tail; - - /* u8 obj_num equals tx_ring->obj_num */ - /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ -}; - struct __packed mcp251xfd_buf_cmd { __be16 cmd; }; @@ -505,6 +498,17 @@ struct mcp251xfd_tx_obj { union mcp251xfd_tx_obj_load_buf buf; }; +struct mcp251xfd_tef_ring { + unsigned int head; + unsigned int tail; + + /* u8 obj_num equals tx_ring->obj_num */ + /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ + + union mcp251xfd_write_reg_buf uinc_buf; + struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX]; +}; + struct mcp251xfd_tx_ring { unsigned int head; unsigned int tail; @@ -527,6 +531,8 @@ struct mcp251xfd_rx_ring { u8 obj_num; u8 obj_size; + union mcp251xfd_write_reg_buf uinc_buf; + struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX]; struct mcp251xfd_hw_rx_obj_canfd obj[]; }; @@ -580,7 +586,7 @@ struct mcp251xfd_priv { struct spi_device *spi; u32 spi_max_speed_hz_orig; - struct mcp251xfd_tef_ring tef; + struct mcp251xfd_tef_ring tef[1]; struct mcp251xfd_tx_ring tx[1]; struct mcp251xfd_rx_ring *rx[1]; @@ -741,17 +747,17 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n) static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv) { - return priv->tef.head & (priv->tx->obj_num - 1); + return priv->tef->head & (priv->tx->obj_num - 1); } static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv) { - return priv->tef.tail & (priv->tx->obj_num - 1); + return priv->tef->tail & (priv->tx->obj_num - 1); } static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv) { - return priv->tef.head - priv->tef.tail; + return priv->tef->head - priv->tef->tail; } static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv) diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index b3f2f4fe5ee0..783b63218b7b 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -424,7 +424,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d netif_stop_queue(dev); id = cf->can_id; - dlc = cf->can_dlc; + dlc = cf->len; msg_flag_n = dlc; if (id & CAN_RTR_FLAG) @@ -475,7 +475,7 @@ static void sun4i_can_rx(struct net_device *dev) return; fi = readl(priv->base + SUN4I_REG_BUF0_ADDR); - cf->can_dlc = get_can_dlc(fi & 0x0F); + cf->len = can_cc_dlc2len(fi & 0x0F); if (fi & SUN4I_MSG_EFF_FLAG) { dreg = SUN4I_REG_BUF5_ADDR; id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 21) | @@ -493,7 +493,7 @@ static void sun4i_can_rx(struct net_device *dev) if (fi & SUN4I_MSG_RTR_FLAG) id |= CAN_RTR_FLAG; else - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) cf->data[i] = readl(priv->base + dreg + i * 4); cf->can_id = id; @@ -501,7 +501,7 @@ static void sun4i_can_rx(struct net_device *dev) sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); @@ -624,7 +624,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) if (likely(skb)) { stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } else { return -ENOMEM; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 2c22f40e12bd..a6850ff0b55b 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -496,7 +496,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) spin_unlock_irqrestore(&priv->mbx_lock, flags); /* Prepare mailbox for transmission */ - data = cf->can_dlc | (get_tx_head_prio(priv) << 8); + data = cf->len | (get_tx_head_prio(priv) << 8); if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ data |= HECC_CANMCF_RTR; hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); @@ -508,7 +508,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) hecc_write_mbx(priv, mbxno, HECC_CANMID, data); hecc_write_mbx(priv, mbxno, HECC_CANMDL, be32_to_cpu(*(__be32 *)(cf->data))); - if (cf->can_dlc > 4) + if (cf->len > 4) hecc_write_mbx(priv, mbxno, HECC_CANMDH, be32_to_cpu(*(__be32 *)(cf->data + 4))); else @@ -566,11 +566,11 @@ static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload, data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); if (data & HECC_CANMCF_RTR) cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = get_can_dlc(data & 0xF); + cf->len = can_cc_dlc2len(data & 0xF); data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); *(__be32 *)(cf->data) = cpu_to_be32(data); - if (cf->can_dlc > 4) { + if (cf->len > 4) { data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); *(__be32 *)(cf->data + 4) = cpu_to_be32(data); } diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index bcb331b0c958..c1e5d5b570b6 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -52,7 +52,9 @@ config CAN_KVASER_USB - Kvaser Leaf Light "China" - Kvaser BlackBird SemiPro - Kvaser USBcan R + - Kvaser USBcan R v2 - Kvaser Leaf Light v2 + - Kvaser Leaf Light R v2 - Kvaser Mini PCI Express HS - Kvaser Mini PCI Express 2xHS - Kvaser USBcan Light 2xHS @@ -72,6 +74,9 @@ config CAN_KVASER_USB - Kvaser USBcan Light 4xHS - Kvaser USBcan Pro 2xHS v2 - Kvaser USBcan Pro 5xHS + - Kvaser U100 + - Kvaser U100P + - Kvaser U100S - ATI Memorator Pro 2xHS v2 - ATI USBcan Pro 2xHS v2 diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 4f52810bebf8..25eee4466364 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -306,7 +306,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) return; cf->can_id = le32_to_cpu(msg->msg.can_msg.id); - cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF); + cf->len = can_cc_dlc2len(msg->msg.can_msg.length & 0xF); if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) @@ -316,12 +316,12 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { cf->can_id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.can_msg.msg[i]; } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -396,7 +396,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -755,7 +755,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); - msg->msg.can_msg.length = cf->can_dlc; + msg->msg.can_msg.length = cf->len; if (cf->can_id & CAN_RTR_FLAG) { msg->type = cf->can_id & CAN_EFF_FLAG ? @@ -766,10 +766,10 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME; - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) msg->msg.can_msg.msg[i] = cf->data[i]; - msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc; + msg->length = CPC_CAN_MSG_MIN_SIZE + cf->len; } for (i = 0; i < MAX_TX_URBS; i++) { @@ -794,7 +794,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne context->dev = dev; context->echo_index = i; - context->dlc = cf->can_dlc; + context->dlc = cf->len; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index b5d7ed21d7d9..9eed75a4b678 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -183,7 +183,7 @@ struct esd_usb2_net_priv; struct esd_tx_urb_context { struct esd_usb2_net_priv *priv; u32 echo_index; - int dlc; + int len; /* CAN payload length */ }; struct esd_usb2 { @@ -292,7 +292,7 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, priv->bec.rxerr = rxerr; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } } @@ -321,7 +321,8 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, } cf->can_id = id & ESD_IDMASK; - cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR); + can_frame_set_cc_len(cf, msg->msg.rx.dlc & ~ESD_RTR, + priv->can.ctrlmode); if (id & ESD_EXTID) cf->can_id |= CAN_EFF_FLAG; @@ -329,12 +330,12 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, if (msg->msg.rx.dlc & ESD_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.rx.data[i]; } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -355,7 +356,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv, if (!msg->msg.txdone.status) { stats->tx_packets++; - stats->tx_bytes += context->dlc; + stats->tx_bytes += context->len; can_get_echo_skb(netdev, context->echo_index); } else { stats->tx_errors++; @@ -737,7 +738,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, msg->msg.hdr.len = 3; /* minimal length */ msg->msg.hdr.cmd = CMD_CAN_TX; msg->msg.tx.net = priv->index; - msg->msg.tx.dlc = cf->can_dlc; + msg->msg.tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); if (cf->can_id & CAN_RTR_FLAG) @@ -746,10 +747,10 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_EFF_FLAG) msg->msg.tx.id |= cpu_to_le32(ESD_EXTID); - for (i = 0; i < cf->can_dlc; i++) + for (i = 0; i < cf->len; i++) msg->msg.tx.data[i] = cf->data[i]; - msg->msg.hdr.len += (cf->can_dlc + 3) >> 2; + msg->msg.hdr.len += (cf->len + 3) >> 2; for (i = 0; i < MAX_TX_URBS; i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { @@ -769,7 +770,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb, context->priv = priv; context->echo_index = i; - context->dlc = cf->can_dlc; + context->len = cf->len; /* hnd must not be 0 - MSB is stripped in txdone handling */ msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */ @@ -988,7 +989,8 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index) priv->index = index; priv->can.state = CAN_STATE_STOPPED; - priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC; if (le16_to_cpu(dev->udev->descriptor.idProduct) == USB_CANUSBM_PRODUCT_ID) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 018ca3b057a3..0487095e1fd0 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -9,6 +9,7 @@ * Many thanks to all socketcan devs! */ +#include <linux/ethtool.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/module.h> @@ -337,7 +338,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) cf->can_id = le32_to_cpu(hf->can_id); - cf->can_dlc = get_can_dlc(hf->can_dlc); + can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode); memcpy(cf->data, hf->data, 8); /* ERROR frames tell us information about the controller */ @@ -384,7 +385,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) goto resubmit_urb; cf->can_id |= CAN_ERR_CRTL; - cf->can_dlc = CAN_ERR_DLC; + cf->len = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; @@ -510,8 +511,9 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, cf = (struct can_frame *)skb->data; hf->can_id = cpu_to_le32(cf->can_id); - hf->can_dlc = cf->can_dlc; - memcpy(hf->data, cf->data, cf->can_dlc); + hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode); + + memcpy(hf->data, cf->data, cf->len); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT), @@ -866,7 +868,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, dev->can.bittiming_const = &dev->bt_const; dev->can.do_set_bittiming = gs_usb_set_bittiming; - dev->can.ctrlmode_supported = 0; + dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC; feature = le32_to_cpu(bt_const->feature); if (feature & GS_CAN_FEATURE_LISTEN_ONLY) diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 0f1d3e807d63..e2d58846c40c 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -58,6 +58,11 @@ #define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290 #define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291 #define USB_MINI_PCIE_2HS_PRODUCT_ID 292 +#define USB_USBCAN_R_V2_PRODUCT_ID 294 +#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295 +#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296 +#define USB_LEAF_PRODUCT_ID_END \ + USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID /* Kvaser USBCan-II devices product ids */ #define USB_USBCAN_REVB_PRODUCT_ID 2 @@ -78,13 +83,18 @@ #define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268 #define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269 #define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270 +#define USB_U100_PRODUCT_ID 273 +#define USB_U100P_PRODUCT_ID 274 +#define USB_U100S_PRODUCT_ID 275 +#define USB_HYDRA_PRODUCT_ID_END \ + USB_U100S_PRODUCT_ID static inline bool kvaser_is_leaf(const struct usb_device_id *id) { return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID && id->idProduct <= USB_CAN_R_PRODUCT_ID) || (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID && - id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID); + id->idProduct <= USB_LEAF_PRODUCT_ID_END); } static inline bool kvaser_is_usbcan(const struct usb_device_id *id) @@ -96,7 +106,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) static inline bool kvaser_is_hydra(const struct usb_device_id *id) { return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID && - id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID; + id->idProduct <= USB_HYDRA_PRODUCT_ID_END; } static const struct usb_device_id kvaser_usb_table[] = { @@ -153,6 +163,9 @@ static const struct usb_device_id kvaser_usb_table[] = { { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) }, /* USBCANII USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), @@ -177,6 +190,9 @@ static const struct usb_device_id kvaser_usb_table[] = { { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) }, { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); @@ -258,7 +274,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev) cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); return 0; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 218fadc91155..480bd2ecb296 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -34,6 +34,7 @@ /* Forward declarations */ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt; #define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82 #define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02 @@ -135,6 +136,7 @@ struct kvaser_cmd_sw_detail_req { #define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10) #define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11) #define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12) +#define KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M BIT(13) struct kvaser_cmd_sw_detail_res { __le32 sw_flags; __le32 sw_version; @@ -383,6 +385,30 @@ static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = { .brp_inc = 1, }; +static const struct can_bittiming_const kvaser_usb_hydra_rt_bittiming_c = { + .name = "kvaser_usb_rt", + .tseg1_min = 2, + .tseg1_max = 96, + .tseg2_min = 2, + .tseg2_max = 32, + .sjw_max = 32, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static const struct can_bittiming_const kvaser_usb_hydra_rtd_bittiming_c = { + .name = "kvaser_usb_rt", + .tseg1_min = 2, + .tseg1_max = 39, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + #define KVASER_USB_HYDRA_TRANSID_BITS 12 #define KVASER_USB_HYDRA_TRANSID_MASK \ GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0) @@ -895,7 +921,7 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, stats = &netdev->stats; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1049,7 +1075,7 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, cf->data[7] = bec.rxerr; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); priv->bec.txerr = bec.txerr; @@ -1084,7 +1110,7 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv, stats->tx_errors++; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1120,7 +1146,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, struct net_device_stats *stats = &priv->netdev->stats; stats->tx_packets++; - stats->tx_bytes += can_dlc2len(context->dlc); + stats->tx_bytes += can_fd_dlc2len(context->dlc); } spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); @@ -1180,15 +1206,15 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) kvaser_usb_can_rx_over_error(priv->netdev); - cf->can_dlc = get_can_dlc(cmd->rx_can.dlc); + cf->len = can_cc_dlc2len(cmd->rx_can.dlc); if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else - memcpy(cf->data, cmd->rx_can.data, cf->can_dlc); + memcpy(cf->data, cmd->rx_can.data, cf->len); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -1251,13 +1277,13 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, kvaser_usb_can_rx_over_error(priv->netdev); if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) { - cf->len = can_dlc2len(get_canfd_dlc(dlc)); + cf->len = can_fd_dlc2len(dlc); if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS) cf->flags |= CANFD_BRS; if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI) cf->flags |= CANFD_ESI; } else { - cf->len = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); } if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) @@ -1351,7 +1377,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, struct kvaser_usb *dev = priv->dev; struct kvaser_cmd_ext *cmd; struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u8 dlc = can_len2dlc(cf->len); + u8 dlc = can_fd_len2dlc(cf->len); u8 nbr_of_bytes = cf->len; u32 flags; u32 id; @@ -1434,7 +1460,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, u32 flags; u32 id; - *frame_len = cf->can_dlc; + *frame_len = cf->len; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); if (!cmd) @@ -1455,7 +1481,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, id = cf->can_id & CAN_SFF_MASK; } - cmd->tx_can.dlc = cf->can_dlc; + cmd->tx_can.dlc = cf->len; flags = (cf->can_id & CAN_EFF_FLAG ? KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0); @@ -1727,6 +1753,8 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M) dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan; + else if (flags & KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M) + dev->cfg = &kvaser_usb_hydra_dev_cfg_rt; else dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc; @@ -2026,3 +2054,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .timestamp_freq = 1, .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c, }; + +static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { + .clock = { + .freq = 80000000, + }, + .timestamp_freq = 24, + .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c, + .data_bittiming_const = &kvaser_usb_hydra_rtd_bittiming_c, +}; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 1b9957f12459..98c016ef0607 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -350,7 +350,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, u8 *cmd_tx_can_flags = NULL; /* GCC */ struct can_frame *cf = (struct can_frame *)skb->data; - *frame_len = cf->can_dlc; + *frame_len = cf->len; cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); if (cmd) { @@ -383,8 +383,8 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, cmd->u.tx_can.data[1] = cf->can_id & 0x3f; } - cmd->u.tx_can.data[5] = cf->can_dlc; - memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc); + cmd->u.tx_can.data[5] = cf->len; + memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len); if (cf->can_id & CAN_RTR_FLAG) *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; @@ -576,7 +576,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, cf->can_id |= CAN_ERR_RESTARTED; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } else { netdev_err(priv->netdev, @@ -694,7 +694,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, { struct can_frame *cf; struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG, - .can_dlc = CAN_ERR_DLC }; + .len = CAN_ERR_DLC }; struct sk_buff *skb; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; @@ -778,7 +778,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, cf->data[7] = es->rxerr; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -978,13 +978,13 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, else cf->can_id &= CAN_SFF_MASK; - cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc); + cf->len = can_cc_dlc2len(cmd->u.leaf.log_message.dlc); if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, &cmd->u.leaf.log_message.data, - cf->can_dlc); + cf->len); } else { cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); @@ -996,16 +996,16 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, cf->can_id |= CAN_EFF_FLAG; } - cf->can_dlc = get_can_dlc(rx_data[5]); + cf->len = can_cc_dlc2len(rx_data[5]); if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else - memcpy(cf->data, &rx_data[6], cf->can_dlc); + memcpy(cf->data, &rx_data[6], cf->len); } stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index e97f2e0da6b0..df54eb7d4b36 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -184,7 +184,7 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, if (cf) { ctx->can = true; - ctx->dlc = cf->can_dlc; + ctx->dlc = cf->len; } else { ctx->can = false; ctx->dlc = 0; @@ -348,7 +348,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, usb_msg.eid = 0; } - usb_msg.dlc = cf->can_dlc; + usb_msg.dlc = cf->len; memcpy(usb_msg.data, cf->data, usb_msg.dlc); @@ -451,12 +451,12 @@ static void mcba_usb_process_can(struct mcba_priv *priv, if (msg->dlc & MCBA_DLC_RTR_MASK) cf->can_id |= CAN_RTR_FLAG; - cf->can_dlc = get_can_dlc(msg->dlc & MCBA_DLC_MASK); + cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK); - memcpy(cf->data, msg->data, cf->can_dlc); + memcpy(cf->data, msg->data, cf->len); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; can_led_event(priv->netdev, CAN_LED_EVENT_RX); netif_rx(skb); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 63bd2ed96697..e6c1e5d33924 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -596,7 +596,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, } mc->netdev->stats.rx_packets++; - mc->netdev->stats.rx_bytes += cf->can_dlc; + mc->netdev->stats.rx_bytes += cf->len; netif_rx(skb); return 0; @@ -734,7 +734,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) cf->can_id = le16_to_cpu(tmp16) >> 5; } - cf->can_dlc = get_can_dlc(rec_len); + can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode); /* Only first packet timestamp is a word */ if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) @@ -751,7 +751,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) if ((mc->ptr + rec_len) > mc->end) goto decode_failed; - memcpy(cf->data, mc->ptr, cf->can_dlc); + memcpy(cf->data, mc->ptr, cf->len); mc->ptr += rec_len; } @@ -761,7 +761,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) /* update statistics */ mc->netdev->stats.rx_packets++; - mc->netdev->stats.rx_bytes += cf->can_dlc; + mc->netdev->stats.rx_bytes += cf->len; /* push the skb */ netif_rx(skb); @@ -838,7 +838,8 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, pc = obuf + PCAN_USB_MSG_HEADER_LEN; /* status/len byte */ - *pc = cf->can_dlc; + *pc = can_get_cc_dlc(cf, dev->can.ctrlmode); + if (cf->can_id & CAN_RTR_FLAG) *pc |= PCAN_USB_STATUSLEN_RTR; @@ -858,8 +859,8 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, /* can data */ if (!(cf->can_id & CAN_RTR_FLAG)) { - memcpy(pc, cf->data, cf->can_dlc); - pc += cf->can_dlc; + memcpy(pc, cf->data, cf->len); + pc += cf->len; } obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff); @@ -992,7 +993,8 @@ const struct peak_usb_adapter pcan_usb = { .device_id = PCAN_USB_PRODUCT_ID, .ctrl_count = 1, .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING, + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_USB_CRYSTAL_HZ / 2 , }, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 204ccb27d6d9..251835ea15aa 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -295,15 +295,16 @@ static void peak_usb_write_bulk_callback(struct urb *urb) netif_trans_update(netdev); break; - default: - if (net_ratelimit()) - netdev_err(netdev, "Tx urb aborted (%d)\n", - urb->status); case -EPROTO: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: + break; + default: + if (net_ratelimit()) + netdev_err(netdev, "Tx urb aborted (%d)\n", + urb->status); break; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index d29d20525588..61631f4fd92a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -492,14 +492,16 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) cfd->flags |= CANFD_ESI; - cfd->len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(rm))); + cfd->len = can_fd_dlc2len(pucan_msg_get_dlc(rm)); } else { /* CAN 2.0 frame case */ skb = alloc_can_skb(netdev, (struct can_frame **)&cfd); if (!skb) return -ENOMEM; - cfd->len = get_can_dlc(pucan_msg_get_dlc(rm)); + can_frame_set_cc_len((struct can_frame *)cfd, + pucan_msg_get_dlc(rm), + dev->can.ctrlmode); } cfd->can_id = le32_to_cpu(rm->can_id); @@ -581,7 +583,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); netdev->stats.rx_packets++; - netdev->stats.rx_bytes += cf->can_dlc; + netdev->stats.rx_bytes += cf->len; return 0; } @@ -737,7 +739,7 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u16 tx_msg_size, tx_msg_flags; - u8 can_dlc; + u8 dlc; if (cfd->len > CANFD_MAX_DLEN) return -EINVAL; @@ -756,7 +758,7 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, if (can_is_canfd_skb(skb)) { /* considering a CANFD frame */ - can_dlc = can_len2dlc(cfd->len); + dlc = can_fd_len2dlc(cfd->len); tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN; @@ -767,14 +769,15 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAND 2.0 frames */ - can_dlc = cfd->len; + dlc = can_get_cc_dlc((struct can_frame *)cfd, + dev->can.ctrlmode); if (cfd->can_id & CAN_RTR_FLAG) tx_msg_flags |= PUCAN_MSG_RTR; } tx_msg->flags = cpu_to_le16(tx_msg_flags); - tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc); + tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc); memcpy(tx_msg->d, cfd->data, cfd->len); /* add null size message to tag the end (messages are 32-bits aligned) @@ -1036,7 +1039,8 @@ const struct peak_usb_adapter pcan_usb_fd = { .device_id = PCAN_USBFD_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1108,7 +1112,8 @@ const struct peak_usb_adapter pcan_usb_chip = { .device_id = PCAN_USBCHIP_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1180,7 +1185,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { .device_id = PCAN_USBPROFD_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, @@ -1252,7 +1258,8 @@ const struct peak_usb_adapter pcan_usb_x6 = { .device_id = PCAN_USBX6_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | - CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index c7564773fb2b..275087c39602 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -532,7 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, return -ENOMEM; can_frame->can_id = le32_to_cpu(rx->id); - can_frame->can_dlc = rx->len & 0x0f; + can_frame->len = rx->len & 0x0f; if (rx->flags & PCAN_USBPRO_EXT) can_frame->can_id |= CAN_EFF_FLAG; @@ -540,14 +540,14 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, if (rx->flags & PCAN_USBPRO_RTR) can_frame->can_id |= CAN_RTR_FLAG; else - memcpy(can_frame->data, rx->data, can_frame->can_dlc); + memcpy(can_frame->data, rx->data, can_frame->len); hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32), &hwts->hwtstamp); netdev->stats.rx_packets++; - netdev->stats.rx_bytes += can_frame->can_dlc; + netdev->stats.rx_bytes += can_frame->len; netif_rx(skb); return 0; @@ -662,7 +662,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp); netdev->stats.rx_packets++; - netdev->stats.rx_bytes += can_frame->can_dlc; + netdev->stats.rx_bytes += can_frame->len; netif_rx(skb); return 0; @@ -767,14 +767,14 @@ static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev, pcan_msg_init_empty(&usb_msg, obuf, *size); - if ((cf->can_id & CAN_RTR_FLAG) || (cf->can_dlc == 0)) + if ((cf->can_id & CAN_RTR_FLAG) || (cf->len == 0)) data_type = PCAN_USBPRO_TXMSG0; - else if (cf->can_dlc <= 4) + else if (cf->len <= 4) data_type = PCAN_USBPRO_TXMSG4; else data_type = PCAN_USBPRO_TXMSG8; - len = (dev->ctrl_idx << 4) | (cf->can_dlc & 0x0f); + len = (dev->ctrl_idx << 4) | (cf->len & 0x0f); flags = 0; if (cf->can_id & CAN_EFF_FLAG) diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index dc5290b36598..7d92da8954fe 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -303,12 +303,12 @@ struct ucan_priv { struct ucan_urb_context *context_array; }; -static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len) +static u8 ucan_can_cc_dlc2len(struct ucan_can_msg *msg, u16 len) { if (le32_to_cpu(msg->id) & CAN_RTR_FLAG) - return get_can_dlc(msg->dlc); + return can_cc_dlc2len(msg->dlc); else - return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); + return can_cc_dlc2len(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); } static void ucan_release_context_array(struct ucan_priv *up) @@ -614,15 +614,15 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) cf->can_id = canid; /* compute DLC taking RTR_FLAG into account */ - cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len); + cf->len = ucan_can_cc_dlc2len(&m->msg.can_msg, len); /* copy the payload of non RTR frames */ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG)) - memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc); + memcpy(cf->data, m->msg.can_msg.data, cf->len); /* don't count error frames as real packets */ stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; /* pass it to Linux */ netif_rx(skb); @@ -1078,15 +1078,15 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, mlen = UCAN_OUT_HDR_SIZE + offsetof(struct ucan_can_msg, dlc) + sizeof(m->msg.can_msg.dlc); - m->msg.can_msg.dlc = cf->can_dlc; + m->msg.can_msg.dlc = cf->len; } else { mlen = UCAN_OUT_HDR_SIZE + - sizeof(m->msg.can_msg.id) + cf->can_dlc; - memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc); + sizeof(m->msg.can_msg.id) + cf->len; + memcpy(m->msg.can_msg.data, cf->data, cf->len); } m->len = cpu_to_le16(mlen); - context->dlc = cf->can_dlc; + context->dlc = cf->len; m->subtype = echo_index; diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 62749c67c959..44478304ff46 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -449,7 +449,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, priv->bec.rxerr = rxerr; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } @@ -470,7 +470,7 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, return; cf->can_id = be32_to_cpu(msg->id); - cf->can_dlc = get_can_dlc(msg->dlc & 0xF); + can_frame_set_cc_len(cf, msg->dlc & 0xF, priv->can.ctrlmode); if (msg->flags & USB_8DEV_EXTID) cf->can_id |= CAN_EFF_FLAG; @@ -478,10 +478,10 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, if (msg->flags & USB_8DEV_RTR) cf->can_id |= CAN_RTR_FLAG; else - memcpy(cf->data, msg->data, cf->can_dlc); + memcpy(cf->data, msg->data, cf->len); stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); can_led_event(priv->netdev, CAN_LED_EVENT_RX); @@ -637,8 +637,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, msg->flags |= USB_8DEV_EXTID; msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK); - msg->dlc = cf->can_dlc; - memcpy(msg->data, cf->data, cf->can_dlc); + msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(msg->data, cf->data, cf->len); msg->end = USB_8DEV_DATA_END; for (i = 0; i < MAX_TX_URBS; i++) { @@ -656,7 +656,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, context->priv = priv; context->echo_index = i; - context->dlc = cf->can_dlc; + context->dlc = cf->len; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), @@ -928,7 +928,8 @@ static int usb_8dev_probe(struct usb_interface *intf, priv->can.do_get_berr_counter = usb_8dev_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_ONE_SHOT; + CAN_CTRLMODE_ONE_SHOT | + CAN_CTRLMODE_CC_LEN8_DLC; netdev->netdev_ops = &usb_8dev_netdev_ops; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index d6ba9426be4d..fa47bab510bb 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -186,7 +186,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, } if (ifmp && tbp[IFLA_IFNAME]) { - nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); @@ -223,7 +223,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, /* register first device */ if (tb[IFLA_IFNAME]) - nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 48d746e18f30..3f54edee92eb 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -583,7 +583,7 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, id |= XCAN_IDR_SRR_MASK; } - dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; + dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; if (can_is_canfd_skb(skb)) { if (cf->flags & CANFD_BRS) dlc |= XCAN_DLCR_BRS_MASK; @@ -759,7 +759,7 @@ static int xcan_rx(struct net_device *ndev, int frame_base) XCAN_DLCR_DLC_SHIFT; /* Change Xilinx CAN data length format to socketCAN data format */ - cf->can_dlc = get_can_dlc(dlc); + cf->len = can_cc_dlc2len(dlc); /* Change Xilinx CAN ID format to socketCAN ID format */ if (id_xcan & XCAN_IDR_IDE_MASK) { @@ -784,13 +784,13 @@ static int xcan_rx(struct net_device *ndev, int frame_base) if (!(cf->can_id & CAN_RTR_FLAG)) { /* Change Xilinx CAN data format to socketCAN data format */ - if (cf->can_dlc > 0) + if (cf->len > 0) *(__be32 *)(cf->data) = cpu_to_be32(data[0]); - if (cf->can_dlc > 4) + if (cf->len > 4) *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); } - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); @@ -832,10 +832,10 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base) * format */ if (dlc & XCAN_DLCR_EDL_MASK) - cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> + cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> XCAN_DLCR_DLC_SHIFT); else - cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >> + cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> XCAN_DLCR_DLC_SHIFT); /* Change Xilinx CAN ID format to socketCAN ID format */ @@ -970,7 +970,7 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev) struct net_device_stats *stats = &ndev->stats; stats->rx_packets++; - stats->rx_bytes += cf->can_dlc; + stats->rx_bytes += cf->len; netif_rx(skb); } } diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2451f61a38e4..f6a0488589fc 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -24,6 +24,8 @@ config NET_DSA_LOOP This enables support for a fake mock-up switch chip which exercises the DSA APIs. +source "drivers/net/dsa/hirschmann/Kconfig" + config NET_DSA_LANTIQ_GSWIP tristate "Lantiq / Intel GSWIP" depends on HAS_IOMEM && NET_DSA diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 4a943ccc2ca4..a84adb140a04 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o obj-y += b53/ +obj-y += hirschmann/ obj-y += microchip/ obj-y += mv88e6xxx/ obj-y += ocelot/ diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig new file mode 100644 index 000000000000..222dd35e2c9d --- /dev/null +++ b/drivers/net/dsa/hirschmann/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +config NET_DSA_HIRSCHMANN_HELLCREEK + tristate "Hirschmann Hellcreek TSN Switch support" + depends on HAS_IOMEM + depends on NET_DSA + depends on PTP_1588_CLOCK + select NET_DSA_TAG_HELLCREEK + help + This driver adds support for Hirschmann Hellcreek TSN switches. diff --git a/drivers/net/dsa/hirschmann/Makefile b/drivers/net/dsa/hirschmann/Makefile new file mode 100644 index 000000000000..f4075c2998b5 --- /dev/null +++ b/drivers/net/dsa/hirschmann/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK) += hellcreek_sw.o +hellcreek_sw-objs := hellcreek.o +hellcreek_sw-objs += hellcreek_ptp.o +hellcreek_sw-objs += hellcreek_hwtstamp.o diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c new file mode 100644 index 000000000000..6420b76ea37c --- /dev/null +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: (GPL-2.0 or MIT) +/* + * DSA driver for: + * Hirschmann Hellcreek TSN switch. + * + * Copyright (C) 2019,2020 Linutronix GmbH + * Author Kurt Kanzenbach <kurt@linutronix.de> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/platform_device.h> +#include <linux/bitops.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/etherdevice.h> +#include <linux/random.h> +#include <linux/iopoll.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <net/dsa.h> + +#include "hellcreek.h" +#include "hellcreek_ptp.h" +#include "hellcreek_hwtstamp.h" + +static const struct hellcreek_counter hellcreek_counter[] = { + { 0x00, "RxFiltered", }, + { 0x01, "RxOctets1k", }, + { 0x02, "RxVTAG", }, + { 0x03, "RxL2BAD", }, + { 0x04, "RxOverloadDrop", }, + { 0x05, "RxUC", }, + { 0x06, "RxMC", }, + { 0x07, "RxBC", }, + { 0x08, "RxRS<64", }, + { 0x09, "RxRS64", }, + { 0x0a, "RxRS65_127", }, + { 0x0b, "RxRS128_255", }, + { 0x0c, "RxRS256_511", }, + { 0x0d, "RxRS512_1023", }, + { 0x0e, "RxRS1024_1518", }, + { 0x0f, "RxRS>1518", }, + { 0x10, "TxTailDropQueue0", }, + { 0x11, "TxTailDropQueue1", }, + { 0x12, "TxTailDropQueue2", }, + { 0x13, "TxTailDropQueue3", }, + { 0x14, "TxTailDropQueue4", }, + { 0x15, "TxTailDropQueue5", }, + { 0x16, "TxTailDropQueue6", }, + { 0x17, "TxTailDropQueue7", }, + { 0x18, "RxTrafficClass0", }, + { 0x19, "RxTrafficClass1", }, + { 0x1a, "RxTrafficClass2", }, + { 0x1b, "RxTrafficClass3", }, + { 0x1c, "RxTrafficClass4", }, + { 0x1d, "RxTrafficClass5", }, + { 0x1e, "RxTrafficClass6", }, + { 0x1f, "RxTrafficClass7", }, + { 0x21, "TxOctets1k", }, + { 0x22, "TxVTAG", }, + { 0x23, "TxL2BAD", }, + { 0x25, "TxUC", }, + { 0x26, "TxMC", }, + { 0x27, "TxBC", }, + { 0x28, "TxTS<64", }, + { 0x29, "TxTS64", }, + { 0x2a, "TxTS65_127", }, + { 0x2b, "TxTS128_255", }, + { 0x2c, "TxTS256_511", }, + { 0x2d, "TxTS512_1023", }, + { 0x2e, "TxTS1024_1518", }, + { 0x2f, "TxTS>1518", }, + { 0x30, "TxTrafficClassOverrun0", }, + { 0x31, "TxTrafficClassOverrun1", }, + { 0x32, "TxTrafficClassOverrun2", }, + { 0x33, "TxTrafficClassOverrun3", }, + { 0x34, "TxTrafficClassOverrun4", }, + { 0x35, "TxTrafficClassOverrun5", }, + { 0x36, "TxTrafficClassOverrun6", }, + { 0x37, "TxTrafficClassOverrun7", }, + { 0x38, "TxTrafficClass0", }, + { 0x39, "TxTrafficClass1", }, + { 0x3a, "TxTrafficClass2", }, + { 0x3b, "TxTrafficClass3", }, + { 0x3c, "TxTrafficClass4", }, + { 0x3d, "TxTrafficClass5", }, + { 0x3e, "TxTrafficClass6", }, + { 0x3f, "TxTrafficClass7", }, +}; + +static u16 hellcreek_read(struct hellcreek *hellcreek, unsigned int offset) +{ + return readw(hellcreek->base + offset); +} + +static u16 hellcreek_read_ctrl(struct hellcreek *hellcreek) +{ + return readw(hellcreek->base + HR_CTRL_C); +} + +static u16 hellcreek_read_stat(struct hellcreek *hellcreek) +{ + return readw(hellcreek->base + HR_SWSTAT); +} + +static void hellcreek_write(struct hellcreek *hellcreek, u16 data, + unsigned int offset) +{ + writew(data, hellcreek->base + offset); +} + +static void hellcreek_select_port(struct hellcreek *hellcreek, int port) +{ + u16 val = port << HR_PSEL_PTWSEL_SHIFT; + + hellcreek_write(hellcreek, val, HR_PSEL); +} + +static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio) +{ + u16 val = prio << HR_PSEL_PRTCWSEL_SHIFT; + + hellcreek_write(hellcreek, val, HR_PSEL); +} + +static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter) +{ + u16 val = counter << HR_CSEL_SHIFT; + + hellcreek_write(hellcreek, val, HR_CSEL); + + /* Data sheet states to wait at least 20 internal clock cycles */ + ndelay(200); +} + +static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid, + bool pvid) +{ + u16 val = 0; + + /* Set pvid bit first */ + if (pvid) + val |= HR_VIDCFG_PVID; + hellcreek_write(hellcreek, val, HR_VIDCFG); + + /* Set vlan */ + val |= vid << HR_VIDCFG_VID_SHIFT; + hellcreek_write(hellcreek, val, HR_VIDCFG); +} + +static int hellcreek_wait_until_ready(struct hellcreek *hellcreek) +{ + u16 val; + + /* Wait up to 1ms, although 3 us should be enough */ + return readx_poll_timeout(hellcreek_read_ctrl, hellcreek, + val, val & HR_CTRL_C_READY, + 3, 1000); +} + +static int hellcreek_wait_until_transitioned(struct hellcreek *hellcreek) +{ + u16 val; + + return readx_poll_timeout_atomic(hellcreek_read_ctrl, hellcreek, + val, !(val & HR_CTRL_C_TRANSITION), + 1, 1000); +} + +static int hellcreek_wait_fdb_ready(struct hellcreek *hellcreek) +{ + u16 val; + + return readx_poll_timeout_atomic(hellcreek_read_stat, hellcreek, + val, !(val & HR_SWSTAT_BUSY), + 1, 1000); +} + +static int hellcreek_detect(struct hellcreek *hellcreek) +{ + u16 id, rel_low, rel_high, date_low, date_high, tgd_ver; + u8 tgd_maj, tgd_min; + u32 rel, date; + + id = hellcreek_read(hellcreek, HR_MODID_C); + rel_low = hellcreek_read(hellcreek, HR_REL_L_C); + rel_high = hellcreek_read(hellcreek, HR_REL_H_C); + date_low = hellcreek_read(hellcreek, HR_BLD_L_C); + date_high = hellcreek_read(hellcreek, HR_BLD_H_C); + tgd_ver = hellcreek_read(hellcreek, TR_TGDVER); + + if (id != hellcreek->pdata->module_id) + return -ENODEV; + + rel = rel_low | (rel_high << 16); + date = date_low | (date_high << 16); + tgd_maj = (tgd_ver & TR_TGDVER_REV_MAJ_MASK) >> TR_TGDVER_REV_MAJ_SHIFT; + tgd_min = (tgd_ver & TR_TGDVER_REV_MIN_MASK) >> TR_TGDVER_REV_MIN_SHIFT; + + dev_info(hellcreek->dev, "Module ID=%02x Release=%04x Date=%04x TGD Version=%02x.%02x\n", + id, rel, date, tgd_maj, tgd_min); + + return 0; +} + +static void hellcreek_feature_detect(struct hellcreek *hellcreek) +{ + u16 features; + + features = hellcreek_read(hellcreek, HR_FEABITS0); + + /* Currently we only detect the size of the FDB table */ + hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >> + HR_FEABITS0_FDBBINS_SHIFT) * 32; + + dev_info(hellcreek->dev, "Feature detect: FDB entries=%zu\n", + hellcreek->fdb_entries); +} + +static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mp) +{ + return DSA_TAG_PROTO_HELLCREEK; +} + +static int hellcreek_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "Enable port %d\n", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + val |= HR_PTCFG_ADMIN_EN; + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); + + return 0; +} + +static void hellcreek_port_disable(struct dsa_switch *ds, int port) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "Disable port %d\n", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + val &= ~HR_PTCFG_ADMIN_EN; + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) { + const struct hellcreek_counter *counter = &hellcreek_counter[i]; + + strlcpy(data + i * ETH_GSTRING_LEN, + counter->name, ETH_GSTRING_LEN); + } +} + +static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(hellcreek_counter); +} + +static void hellcreek_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port *hellcreek_port; + int i; + + hellcreek_port = &hellcreek->ports[port]; + + for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) { + const struct hellcreek_counter *counter = &hellcreek_counter[i]; + u8 offset = counter->offset + port * 64; + u16 high, low; + u64 value; + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_counter(hellcreek, offset); + + /* The registers are locked internally by selecting the + * counter. So low and high can be read without reading high + * again. + */ + high = hellcreek_read(hellcreek, HR_CRDH); + low = hellcreek_read(hellcreek, HR_CRDL); + value = ((u64)high << 16) | low; + + hellcreek_port->counter_values[i] += value; + data[i] = hellcreek_port->counter_values[i]; + + mutex_unlock(&hellcreek->reg_lock); + } +} + +static u16 hellcreek_private_vid(int port) +{ + return VLAN_N_VID - port + 1; +} + +static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct hellcreek *hellcreek = ds->priv; + int i; + + dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port); + + /* Restriction: Make sure that nobody uses the "private" VLANs. These + * VLANs are internally used by the driver to ensure port + * separation. Thus, they cannot be used by someone else. + */ + for (i = 0; i < hellcreek->pdata->num_ports; ++i) { + const u16 restricted_vid = hellcreek_private_vid(i); + u16 vid; + + if (!dsa_is_user_port(ds, i)) + continue; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) + if (vid == restricted_vid) + return -EBUSY; + } + + return 0; +} + +static void hellcreek_select_vlan_params(struct hellcreek *hellcreek, int port, + int *shift, int *mask) +{ + switch (port) { + case 0: + *shift = HR_VIDMBRCFG_P0MBR_SHIFT; + *mask = HR_VIDMBRCFG_P0MBR_MASK; + break; + case 1: + *shift = HR_VIDMBRCFG_P1MBR_SHIFT; + *mask = HR_VIDMBRCFG_P1MBR_MASK; + break; + case 2: + *shift = HR_VIDMBRCFG_P2MBR_SHIFT; + *mask = HR_VIDMBRCFG_P2MBR_MASK; + break; + case 3: + *shift = HR_VIDMBRCFG_P3MBR_SHIFT; + *mask = HR_VIDMBRCFG_P3MBR_MASK; + break; + default: + *shift = *mask = 0; + dev_err(hellcreek->dev, "Unknown port %d selected!\n", port); + } +} + +static void hellcreek_apply_vlan(struct hellcreek *hellcreek, int port, u16 vid, + bool pvid, bool untagged) +{ + int shift, mask; + u16 val; + + dev_dbg(hellcreek->dev, "Apply VLAN: port=%d vid=%u pvid=%d untagged=%d", + port, vid, pvid, untagged); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + hellcreek_select_vlan(hellcreek, vid, pvid); + + /* Setup port vlan membership */ + hellcreek_select_vlan_params(hellcreek, port, &shift, &mask); + val = hellcreek->vidmbrcfg[vid]; + val &= ~mask; + if (untagged) + val |= HELLCREEK_VLAN_UNTAGGED_MEMBER << shift; + else + val |= HELLCREEK_VLAN_TAGGED_MEMBER << shift; + + hellcreek_write(hellcreek, val, HR_VIDMBRCFG); + hellcreek->vidmbrcfg[vid] = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port, + u16 vid) +{ + int shift, mask; + u16 val; + + dev_dbg(hellcreek->dev, "Unapply VLAN: port=%d vid=%u\n", port, vid); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_vlan(hellcreek, vid, 0); + + /* Setup port vlan membership */ + hellcreek_select_vlan_params(hellcreek, port, &shift, &mask); + val = hellcreek->vidmbrcfg[vid]; + val &= ~mask; + val |= HELLCREEK_VLAN_NO_MEMBER << shift; + + hellcreek_write(hellcreek, val, HR_VIDMBRCFG); + hellcreek->vidmbrcfg[vid] = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct hellcreek *hellcreek = ds->priv; + u16 vid; + + dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n", + vlan->vid_begin, vlan->vid_end, port, + untagged ? "untagged" : "tagged", + pvid ? "PVID" : "no PVID"); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) + hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged); +} + +static int hellcreek_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct hellcreek *hellcreek = ds->priv; + u16 vid; + + dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n", + vlan->vid_begin, vlan->vid_end, port); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) + hellcreek_unapply_vlan(hellcreek, port, vid); + + return 0; +} + +static void hellcreek_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port *hellcreek_port; + const char *new_state; + u16 val; + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_port = &hellcreek->ports[port]; + val = hellcreek_port->ptcfg; + + switch (state) { + case BR_STATE_DISABLED: + new_state = "DISABLED"; + val |= HR_PTCFG_BLOCKED; + val &= ~HR_PTCFG_LEARNING_EN; + break; + case BR_STATE_BLOCKING: + new_state = "BLOCKING"; + val |= HR_PTCFG_BLOCKED; + val &= ~HR_PTCFG_LEARNING_EN; + break; + case BR_STATE_LISTENING: + new_state = "LISTENING"; + val |= HR_PTCFG_BLOCKED; + val &= ~HR_PTCFG_LEARNING_EN; + break; + case BR_STATE_LEARNING: + new_state = "LEARNING"; + val |= HR_PTCFG_BLOCKED; + val |= HR_PTCFG_LEARNING_EN; + break; + case BR_STATE_FORWARDING: + new_state = "FORWARDING"; + val &= ~HR_PTCFG_BLOCKED; + val |= HR_PTCFG_LEARNING_EN; + break; + default: + new_state = "UNKNOWN"; + } + + hellcreek_select_port(hellcreek, port); + hellcreek_write(hellcreek, val, HR_PTCFG); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); + + dev_dbg(hellcreek->dev, "Configured STP state for port %d: %s\n", + port, new_state); +} + +static void hellcreek_setup_ingressflt(struct hellcreek *hellcreek, int port, + bool enable) +{ + struct hellcreek_port *hellcreek_port = &hellcreek->ports[port]; + u16 ptcfg; + + mutex_lock(&hellcreek->reg_lock); + + ptcfg = hellcreek_port->ptcfg; + + if (enable) + ptcfg |= HR_PTCFG_INGRESSFLT; + else + ptcfg &= ~HR_PTCFG_INGRESSFLT; + + hellcreek_select_port(hellcreek, port); + hellcreek_write(hellcreek, ptcfg, HR_PTCFG); + hellcreek_port->ptcfg = ptcfg; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_setup_vlan_awareness(struct hellcreek *hellcreek, + bool enable) +{ + u16 swcfg; + + mutex_lock(&hellcreek->reg_lock); + + swcfg = hellcreek->swcfg; + + if (enable) + swcfg |= HR_SWCFG_VLAN_UNAWARE; + else + swcfg &= ~HR_SWCFG_VLAN_UNAWARE; + + hellcreek_write(hellcreek, swcfg, HR_SWCFG); + + mutex_unlock(&hellcreek->reg_lock); +} + +/* Default setup for DSA: VLAN <X>: CPU and Port <X> egress untagged. */ +static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port, + bool enabled) +{ + const u16 vid = hellcreek_private_vid(port); + int upstream = dsa_upstream_port(ds, port); + struct hellcreek *hellcreek = ds->priv; + + /* Apply vid to port as egress untagged and port vlan id */ + if (enabled) + hellcreek_apply_vlan(hellcreek, port, vid, true, true); + else + hellcreek_unapply_vlan(hellcreek, port, vid); + + /* Apply vid to cpu port as well */ + if (enabled) + hellcreek_apply_vlan(hellcreek, upstream, vid, false, true); + else + hellcreek_unapply_vlan(hellcreek, upstream, vid); +} + +static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *br) +{ + struct hellcreek *hellcreek = ds->priv; + + dev_dbg(hellcreek->dev, "Port %d joins a bridge\n", port); + + /* When joining a vlan_filtering bridge, keep the switch VLAN aware */ + if (!ds->vlan_filtering) + hellcreek_setup_vlan_awareness(hellcreek, false); + + /* Drop private vlans */ + hellcreek_setup_vlan_membership(ds, port, false); + + return 0; +} + +static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br) +{ + struct hellcreek *hellcreek = ds->priv; + + dev_dbg(hellcreek->dev, "Port %d leaves a bridge\n", port); + + /* Enable VLAN awareness */ + hellcreek_setup_vlan_awareness(hellcreek, true); + + /* Enable private vlans */ + hellcreek_setup_vlan_membership(ds, port, true); +} + +static int __hellcreek_fdb_add(struct hellcreek *hellcreek, + const struct hellcreek_fdb_entry *entry) +{ + u16 meta = 0; + + dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, " + "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask, + entry->is_obt, entry->reprio_en, entry->reprio_tc); + + /* Add mac address */ + hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH); + hellcreek_write(hellcreek, entry->mac[3] | (entry->mac[2] << 8), HR_FDBWDM); + hellcreek_write(hellcreek, entry->mac[5] | (entry->mac[4] << 8), HR_FDBWDL); + + /* Meta data */ + meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT; + if (entry->is_obt) + meta |= HR_FDBWRM0_OBT; + if (entry->reprio_en) { + meta |= HR_FDBWRM0_REPRIO_EN; + meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT; + } + hellcreek_write(hellcreek, meta, HR_FDBWRM0); + + /* Commit */ + hellcreek_write(hellcreek, 0x00, HR_FDBWRCMD); + + /* Wait until done */ + return hellcreek_wait_fdb_ready(hellcreek); +} + +static int __hellcreek_fdb_del(struct hellcreek *hellcreek, + const struct hellcreek_fdb_entry *entry) +{ + dev_dbg(hellcreek->dev, "Delete FDB entry: MAC=%pM!\n", entry->mac); + + /* Delete by matching idx */ + hellcreek_write(hellcreek, entry->idx | HR_FDBWRCMD_FDBDEL, HR_FDBWRCMD); + + /* Wait until done */ + return hellcreek_wait_fdb_ready(hellcreek); +} + +/* Retrieve the index of a FDB entry by mac address. Currently we search through + * the complete table in hardware. If that's too slow, we might have to cache + * the complete FDB table in software. + */ +static int hellcreek_fdb_get(struct hellcreek *hellcreek, + const unsigned char *dest, + struct hellcreek_fdb_entry *entry) +{ + size_t i; + + /* Set read pointer to zero: The read of HR_FDBMAX (read-only register) + * should reset the internal pointer. But, that doesn't work. The vendor + * suggested a subsequent write as workaround. Same for HR_FDBRDH below. + */ + hellcreek_read(hellcreek, HR_FDBMAX); + hellcreek_write(hellcreek, 0x00, HR_FDBMAX); + + /* We have to read the complete table, because the switch/driver might + * enter new entries anywhere. + */ + for (i = 0; i < hellcreek->fdb_entries; ++i) { + unsigned char addr[ETH_ALEN]; + u16 meta, mac; + + meta = hellcreek_read(hellcreek, HR_FDBMDRD); + mac = hellcreek_read(hellcreek, HR_FDBRDL); + addr[5] = mac & 0xff; + addr[4] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDM); + addr[3] = mac & 0xff; + addr[2] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDH); + addr[1] = mac & 0xff; + addr[0] = (mac & 0xff00) >> 8; + + /* Force next entry */ + hellcreek_write(hellcreek, 0x00, HR_FDBRDH); + + if (memcmp(addr, dest, ETH_ALEN)) + continue; + + /* Match found */ + entry->idx = i; + entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> + HR_FDBMDRD_PORTMASK_SHIFT; + entry->age = (meta & HR_FDBMDRD_AGE_MASK) >> + HR_FDBMDRD_AGE_SHIFT; + entry->is_obt = !!(meta & HR_FDBMDRD_OBT); + entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED); + entry->is_static = !!(meta & HR_FDBMDRD_STATIC); + entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >> + HR_FDBMDRD_REPRIO_TC_SHIFT; + entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN); + memcpy(entry->mac, addr, sizeof(addr)); + + return 0; + } + + return -ENOENT; +} + +static int hellcreek_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct hellcreek_fdb_entry entry = { 0 }; + struct hellcreek *hellcreek = ds->priv; + int ret; + + dev_dbg(hellcreek->dev, "Add FDB entry for MAC=%pM\n", addr); + + mutex_lock(&hellcreek->reg_lock); + + ret = hellcreek_fdb_get(hellcreek, addr, &entry); + if (ret) { + /* Not found */ + memcpy(entry.mac, addr, sizeof(entry.mac)); + entry.portmask = BIT(port); + + ret = __hellcreek_fdb_add(hellcreek, &entry); + if (ret) { + dev_err(hellcreek->dev, "Failed to add FDB entry!\n"); + goto out; + } + } else { + /* Found */ + ret = __hellcreek_fdb_del(hellcreek, &entry); + if (ret) { + dev_err(hellcreek->dev, "Failed to delete FDB entry!\n"); + goto out; + } + + entry.portmask |= BIT(port); + + ret = __hellcreek_fdb_add(hellcreek, &entry); + if (ret) { + dev_err(hellcreek->dev, "Failed to add FDB entry!\n"); + goto out; + } + } + +out: + mutex_unlock(&hellcreek->reg_lock); + + return ret; +} + +static int hellcreek_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct hellcreek_fdb_entry entry = { 0 }; + struct hellcreek *hellcreek = ds->priv; + int ret; + + dev_dbg(hellcreek->dev, "Delete FDB entry for MAC=%pM\n", addr); + + mutex_lock(&hellcreek->reg_lock); + + ret = hellcreek_fdb_get(hellcreek, addr, &entry); + if (ret) { + /* Not found */ + dev_err(hellcreek->dev, "FDB entry for deletion not found!\n"); + } else { + /* Found */ + ret = __hellcreek_fdb_del(hellcreek, &entry); + if (ret) { + dev_err(hellcreek->dev, "Failed to delete FDB entry!\n"); + goto out; + } + + entry.portmask &= ~BIT(port); + + if (entry.portmask != 0x00) { + ret = __hellcreek_fdb_add(hellcreek, &entry); + if (ret) { + dev_err(hellcreek->dev, "Failed to add FDB entry!\n"); + goto out; + } + } + } + +out: + mutex_unlock(&hellcreek->reg_lock); + + return ret; +} + +static int hellcreek_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct hellcreek *hellcreek = ds->priv; + u16 entries; + size_t i; + + mutex_lock(&hellcreek->reg_lock); + + /* Set read pointer to zero: The read of HR_FDBMAX (read-only register) + * should reset the internal pointer. But, that doesn't work. The vendor + * suggested a subsequent write as workaround. Same for HR_FDBRDH below. + */ + entries = hellcreek_read(hellcreek, HR_FDBMAX); + hellcreek_write(hellcreek, 0x00, HR_FDBMAX); + + dev_dbg(hellcreek->dev, "FDB dump for port %d, entries=%d!\n", port, entries); + + /* Read table */ + for (i = 0; i < hellcreek->fdb_entries; ++i) { + unsigned char null_addr[ETH_ALEN] = { 0 }; + struct hellcreek_fdb_entry entry = { 0 }; + u16 meta, mac; + + meta = hellcreek_read(hellcreek, HR_FDBMDRD); + mac = hellcreek_read(hellcreek, HR_FDBRDL); + entry.mac[5] = mac & 0xff; + entry.mac[4] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDM); + entry.mac[3] = mac & 0xff; + entry.mac[2] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, HR_FDBRDH); + entry.mac[1] = mac & 0xff; + entry.mac[0] = (mac & 0xff00) >> 8; + + /* Force next entry */ + hellcreek_write(hellcreek, 0x00, HR_FDBRDH); + + /* Check valid */ + if (!memcmp(entry.mac, null_addr, ETH_ALEN)) + continue; + + entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >> + HR_FDBMDRD_PORTMASK_SHIFT; + entry.is_static = !!(meta & HR_FDBMDRD_STATIC); + + /* Check port mask */ + if (!(entry.portmask & BIT(port))) + continue; + + cb(entry.mac, 0, entry.is_static, data); + } + + mutex_unlock(&hellcreek->reg_lock); + + return 0; +} + +static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct switchdev_trans *trans) +{ + struct hellcreek *hellcreek = ds->priv; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n", + vlan_filtering ? "Enable" : "Disable", port); + + /* Configure port to drop packages with not known vids */ + hellcreek_setup_ingressflt(hellcreek, port, vlan_filtering); + + /* Enable VLAN awareness on the switch. This save due to + * ds->vlan_filtering_is_global. + */ + hellcreek_setup_vlan_awareness(hellcreek, vlan_filtering); + + return 0; +} + +static int hellcreek_enable_ip_core(struct hellcreek *hellcreek) +{ + int ret; + u16 val; + + mutex_lock(&hellcreek->reg_lock); + + val = hellcreek_read(hellcreek, HR_CTRL_C); + val |= HR_CTRL_C_ENABLE; + hellcreek_write(hellcreek, val, HR_CTRL_C); + ret = hellcreek_wait_until_transitioned(hellcreek); + + mutex_unlock(&hellcreek->reg_lock); + + return ret; +} + +static void hellcreek_setup_cpu_and_tunnel_port(struct hellcreek *hellcreek) +{ + struct hellcreek_port *tunnel_port = &hellcreek->ports[TUNNEL_PORT]; + struct hellcreek_port *cpu_port = &hellcreek->ports[CPU_PORT]; + u16 ptcfg = 0; + + ptcfg |= HR_PTCFG_LEARNING_EN | HR_PTCFG_ADMIN_EN; + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, CPU_PORT); + hellcreek_write(hellcreek, ptcfg, HR_PTCFG); + + hellcreek_select_port(hellcreek, TUNNEL_PORT); + hellcreek_write(hellcreek, ptcfg, HR_PTCFG); + + cpu_port->ptcfg = ptcfg; + tunnel_port->ptcfg = ptcfg; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek) +{ + int i; + + /* The switch has multiple egress queues per port. The queue is selected + * via the PCP field in the VLAN header. The switch internally deals + * with traffic classes instead of PCP values and this mapping is + * configurable. + * + * The default mapping is (PCP - TC): + * 7 - 7 + * 6 - 6 + * 5 - 5 + * 4 - 4 + * 3 - 3 + * 2 - 1 + * 1 - 0 + * 0 - 2 + * + * The default should be an identity mapping. + */ + + for (i = 0; i < 8; ++i) { + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_prio(hellcreek, i); + hellcreek_write(hellcreek, + i << HR_PRTCCFG_PCP_TC_MAP_SHIFT, + HR_PRTCCFG); + + mutex_unlock(&hellcreek->reg_lock); + } +} + +static int hellcreek_setup_fdb(struct hellcreek *hellcreek) +{ + static struct hellcreek_fdb_entry ptp = { + /* MAC: 01-1B-19-00-00-00 */ + .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 0, + .is_static = 1, + .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */ + .reprio_en = 1, + }; + static struct hellcreek_fdb_entry p2p = { + /* MAC: 01-80-C2-00-00-0E */ + .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e }, + .portmask = 0x03, /* Management ports */ + .age = 0, + .is_obt = 0, + .pass_blocked = 0, + .is_static = 1, + .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */ + .reprio_en = 1, + }; + int ret; + + mutex_lock(&hellcreek->reg_lock); + ret = __hellcreek_fdb_add(hellcreek, &ptp); + if (ret) + goto out; + ret = __hellcreek_fdb_add(hellcreek, &p2p); +out: + mutex_unlock(&hellcreek->reg_lock); + + return ret; +} + +static int hellcreek_setup(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + u16 swcfg = 0; + int ret, i; + + dev_dbg(hellcreek->dev, "Set up the switch\n"); + + /* Let's go */ + ret = hellcreek_enable_ip_core(hellcreek); + if (ret) { + dev_err(hellcreek->dev, "Failed to enable IP core!\n"); + return ret; + } + + /* Enable CPU/Tunnel ports */ + hellcreek_setup_cpu_and_tunnel_port(hellcreek); + + /* Switch config: Keep defaults, enable FDB aging and learning and tag + * each frame from/to cpu port for DSA tagging. Also enable the length + * aware shaping mode. This eliminates the need for Qbv guard bands. + */ + swcfg |= HR_SWCFG_FDBAGE_EN | + HR_SWCFG_FDBLRN_EN | + HR_SWCFG_ALWAYS_OBT | + (HR_SWCFG_LAS_ON << HR_SWCFG_LAS_MODE_SHIFT); + hellcreek->swcfg = swcfg; + hellcreek_write(hellcreek, swcfg, HR_SWCFG); + + /* Initial vlan membership to reflect port separation */ + for (i = 0; i < ds->num_ports; ++i) { + if (!dsa_is_user_port(ds, i)) + continue; + + hellcreek_setup_vlan_membership(ds, i, true); + } + + /* Configure PCP <-> TC mapping */ + hellcreek_setup_tc_identity_mapping(hellcreek); + + /* Allow VLAN configurations while not filtering which is the default + * for new DSA drivers. + */ + ds->configure_vlan_while_not_filtering = true; + + /* The VLAN awareness is a global switch setting. Therefore, mixed vlan + * filtering setups are not supported. + */ + ds->vlan_filtering_is_global = true; + + /* Intercept _all_ PTP multicast traffic */ + ret = hellcreek_setup_fdb(hellcreek); + if (ret) { + dev_err(hellcreek->dev, + "Failed to insert static PTP FDB entries\n"); + return ret; + } + + return 0; +} + +static void hellcreek_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct hellcreek *hellcreek = ds->priv; + + dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port); + + /* The MAC settings are a hardware configuration option and cannot be + * changed at run time or by strapping. Therefore the attached PHYs + * should be programmed to only advertise settings which are supported + * by the hardware. + */ + if (hellcreek->pdata->is_100_mbits) + phylink_set(mask, 100baseT_Full); + else + phylink_set(mask, 1000baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static int +hellcreek_port_prechangeupper(struct dsa_switch *ds, int port, + struct netdev_notifier_changeupper_info *info) +{ + struct hellcreek *hellcreek = ds->priv; + bool used = true; + int ret = -EBUSY; + u16 vid; + int i; + + dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port); + + /* + * Deny VLAN devices on top of lan ports with the same VLAN ids, because + * it breaks the port separation due to the private VLANs. Example: + * + * lan0.100 *and* lan1.100 cannot be used in parallel. However, lan0.99 + * and lan1.100 works. + */ + + if (!is_vlan_dev(info->upper_dev)) + return 0; + + vid = vlan_dev_vlan_id(info->upper_dev); + + /* For all ports, check bitmaps */ + mutex_lock(&hellcreek->vlan_lock); + for (i = 0; i < hellcreek->pdata->num_ports; ++i) { + if (!dsa_is_user_port(ds, i)) + continue; + + if (port == i) + continue; + + used = used && test_bit(vid, hellcreek->ports[i].vlan_dev_bitmap); + } + + if (used) + goto out; + + /* Update bitmap */ + set_bit(vid, hellcreek->ports[port].vlan_dev_bitmap); + + ret = 0; + +out: + mutex_unlock(&hellcreek->vlan_lock); + + return ret; +} + +static const struct dsa_switch_ops hellcreek_ds_ops = { + .get_ethtool_stats = hellcreek_get_ethtool_stats, + .get_sset_count = hellcreek_get_sset_count, + .get_strings = hellcreek_get_strings, + .get_tag_protocol = hellcreek_get_tag_protocol, + .get_ts_info = hellcreek_get_ts_info, + .phylink_validate = hellcreek_phylink_validate, + .port_bridge_join = hellcreek_port_bridge_join, + .port_bridge_leave = hellcreek_port_bridge_leave, + .port_disable = hellcreek_port_disable, + .port_enable = hellcreek_port_enable, + .port_fdb_add = hellcreek_fdb_add, + .port_fdb_del = hellcreek_fdb_del, + .port_fdb_dump = hellcreek_fdb_dump, + .port_hwtstamp_set = hellcreek_port_hwtstamp_set, + .port_hwtstamp_get = hellcreek_port_hwtstamp_get, + .port_prechangeupper = hellcreek_port_prechangeupper, + .port_rxtstamp = hellcreek_port_rxtstamp, + .port_stp_state_set = hellcreek_port_stp_state_set, + .port_txtstamp = hellcreek_port_txtstamp, + .port_vlan_add = hellcreek_vlan_add, + .port_vlan_del = hellcreek_vlan_del, + .port_vlan_filtering = hellcreek_vlan_filtering, + .port_vlan_prepare = hellcreek_vlan_prepare, + .setup = hellcreek_setup, +}; + +static int hellcreek_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hellcreek *hellcreek; + struct resource *res; + int ret, i; + + hellcreek = devm_kzalloc(dev, sizeof(*hellcreek), GFP_KERNEL); + if (!hellcreek) + return -ENOMEM; + + hellcreek->vidmbrcfg = devm_kcalloc(dev, VLAN_N_VID, + sizeof(*hellcreek->vidmbrcfg), + GFP_KERNEL); + if (!hellcreek->vidmbrcfg) + return -ENOMEM; + + hellcreek->pdata = of_device_get_match_data(dev); + + hellcreek->ports = devm_kcalloc(dev, hellcreek->pdata->num_ports, + sizeof(*hellcreek->ports), + GFP_KERNEL); + if (!hellcreek->ports) + return -ENOMEM; + + for (i = 0; i < hellcreek->pdata->num_ports; ++i) { + struct hellcreek_port *port = &hellcreek->ports[i]; + + port->counter_values = + devm_kcalloc(dev, + ARRAY_SIZE(hellcreek_counter), + sizeof(*port->counter_values), + GFP_KERNEL); + if (!port->counter_values) + return -ENOMEM; + + port->vlan_dev_bitmap = + devm_kcalloc(dev, + BITS_TO_LONGS(VLAN_N_VID), + sizeof(unsigned long), + GFP_KERNEL); + if (!port->vlan_dev_bitmap) + return -ENOMEM; + + port->hellcreek = hellcreek; + port->port = i; + } + + mutex_init(&hellcreek->reg_lock); + mutex_init(&hellcreek->vlan_lock); + mutex_init(&hellcreek->ptp_lock); + + hellcreek->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsn"); + if (!res) { + dev_err(dev, "No memory region provided!\n"); + return -ENODEV; + } + + hellcreek->base = devm_ioremap_resource(dev, res); + if (IS_ERR(hellcreek->base)) { + dev_err(dev, "No memory available!\n"); + return PTR_ERR(hellcreek->base); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp"); + if (!res) { + dev_err(dev, "No PTP memory region provided!\n"); + return -ENODEV; + } + + hellcreek->ptp_base = devm_ioremap_resource(dev, res); + if (IS_ERR(hellcreek->ptp_base)) { + dev_err(dev, "No memory available!\n"); + return PTR_ERR(hellcreek->ptp_base); + } + + ret = hellcreek_detect(hellcreek); + if (ret) { + dev_err(dev, "No (known) chip found!\n"); + return ret; + } + + ret = hellcreek_wait_until_ready(hellcreek); + if (ret) { + dev_err(dev, "Switch didn't become ready!\n"); + return ret; + } + + hellcreek_feature_detect(hellcreek); + + hellcreek->ds = devm_kzalloc(dev, sizeof(*hellcreek->ds), GFP_KERNEL); + if (!hellcreek->ds) + return -ENOMEM; + + hellcreek->ds->dev = dev; + hellcreek->ds->priv = hellcreek; + hellcreek->ds->ops = &hellcreek_ds_ops; + hellcreek->ds->num_ports = hellcreek->pdata->num_ports; + hellcreek->ds->num_tx_queues = HELLCREEK_NUM_EGRESS_QUEUES; + + ret = dsa_register_switch(hellcreek->ds); + if (ret) { + dev_err_probe(dev, ret, "Unable to register switch\n"); + return ret; + } + + ret = hellcreek_ptp_setup(hellcreek); + if (ret) { + dev_err(dev, "Failed to setup PTP!\n"); + goto err_ptp_setup; + } + + ret = hellcreek_hwtstamp_setup(hellcreek); + if (ret) { + dev_err(dev, "Failed to setup hardware timestamping!\n"); + goto err_tstamp_setup; + } + + platform_set_drvdata(pdev, hellcreek); + + return 0; + +err_tstamp_setup: + hellcreek_ptp_free(hellcreek); +err_ptp_setup: + dsa_unregister_switch(hellcreek->ds); + + return ret; +} + +static int hellcreek_remove(struct platform_device *pdev) +{ + struct hellcreek *hellcreek = platform_get_drvdata(pdev); + + hellcreek_hwtstamp_free(hellcreek); + hellcreek_ptp_free(hellcreek); + dsa_unregister_switch(hellcreek->ds); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct hellcreek_platform_data de1soc_r1_pdata = { + .num_ports = 4, + .is_100_mbits = 1, + .qbv_support = 1, + .qbv_on_cpu_port = 1, + .qbu_support = 0, + .module_id = 0x4c30, +}; + +static const struct of_device_id hellcreek_of_match[] = { + { + .compatible = "hirschmann,hellcreek-de1soc-r1", + .data = &de1soc_r1_pdata, + }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, hellcreek_of_match); + +static struct platform_driver hellcreek_driver = { + .probe = hellcreek_probe, + .remove = hellcreek_remove, + .driver = { + .name = "hellcreek", + .of_match_table = hellcreek_of_match, + }, +}; +module_platform_driver(hellcreek_driver); + +MODULE_AUTHOR("Kurt Kanzenbach <kurt@linutronix.de>"); +MODULE_DESCRIPTION("Hirschmann Hellcreek driver"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h new file mode 100644 index 000000000000..e81781ebc31c --- /dev/null +++ b/drivers/net/dsa/hirschmann/hellcreek.h @@ -0,0 +1,286 @@ +/* SPDX-License-Identifier: (GPL-2.0 or MIT) */ +/* + * DSA driver for: + * Hirschmann Hellcreek TSN switch. + * + * Copyright (C) 2019,2020 Linutronix GmbH + * Author Kurt Kanzenbach <kurt@linutronix.de> + */ + +#ifndef _HELLCREEK_H_ +#define _HELLCREEK_H_ + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/leds.h> +#include <linux/platform_data/hirschmann-hellcreek.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/timecounter.h> +#include <net/dsa.h> + +/* Ports: + * - 0: CPU + * - 1: Tunnel + * - 2: TSN front port 1 + * - 3: TSN front port 2 + * - ... + */ +#define CPU_PORT 0 +#define TUNNEL_PORT 1 + +#define HELLCREEK_VLAN_NO_MEMBER 0x0 +#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1 +#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3 +#define HELLCREEK_NUM_EGRESS_QUEUES 8 + +/* Register definitions */ +#define HR_MODID_C (0 * 2) +#define HR_REL_L_C (1 * 2) +#define HR_REL_H_C (2 * 2) +#define HR_BLD_L_C (3 * 2) +#define HR_BLD_H_C (4 * 2) +#define HR_CTRL_C (5 * 2) +#define HR_CTRL_C_READY BIT(14) +#define HR_CTRL_C_TRANSITION BIT(13) +#define HR_CTRL_C_ENABLE BIT(0) + +#define HR_PSEL (0xa6 * 2) +#define HR_PSEL_PTWSEL_SHIFT 4 +#define HR_PSEL_PTWSEL_MASK GENMASK(5, 4) +#define HR_PSEL_PRTCWSEL_SHIFT 0 +#define HR_PSEL_PRTCWSEL_MASK GENMASK(2, 0) + +#define HR_PTCFG (0xa7 * 2) +#define HR_PTCFG_MLIMIT_EN BIT(13) +#define HR_PTCFG_UMC_FLT BIT(10) +#define HR_PTCFG_UUC_FLT BIT(9) +#define HR_PTCFG_UNTRUST BIT(8) +#define HR_PTCFG_TAG_REQUIRED BIT(7) +#define HR_PTCFG_PPRIO_SHIFT 4 +#define HR_PTCFG_PPRIO_MASK GENMASK(6, 4) +#define HR_PTCFG_INGRESSFLT BIT(3) +#define HR_PTCFG_BLOCKED BIT(2) +#define HR_PTCFG_LEARNING_EN BIT(1) +#define HR_PTCFG_ADMIN_EN BIT(0) + +#define HR_PRTCCFG (0xa8 * 2) +#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0 +#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0) + +#define HR_CSEL (0x8d * 2) +#define HR_CSEL_SHIFT 0 +#define HR_CSEL_MASK GENMASK(7, 0) +#define HR_CRDL (0x8e * 2) +#define HR_CRDH (0x8f * 2) + +#define HR_SWTRC_CFG (0x90 * 2) +#define HR_SWTRC0 (0x91 * 2) +#define HR_SWTRC1 (0x92 * 2) +#define HR_PFREE (0x93 * 2) +#define HR_MFREE (0x94 * 2) + +#define HR_FDBAGE (0x97 * 2) +#define HR_FDBMAX (0x98 * 2) +#define HR_FDBRDL (0x99 * 2) +#define HR_FDBRDM (0x9a * 2) +#define HR_FDBRDH (0x9b * 2) + +#define HR_FDBMDRD (0x9c * 2) +#define HR_FDBMDRD_PORTMASK_SHIFT 0 +#define HR_FDBMDRD_PORTMASK_MASK GENMASK(3, 0) +#define HR_FDBMDRD_AGE_SHIFT 4 +#define HR_FDBMDRD_AGE_MASK GENMASK(7, 4) +#define HR_FDBMDRD_OBT BIT(8) +#define HR_FDBMDRD_PASS_BLOCKED BIT(9) +#define HR_FDBMDRD_STATIC BIT(11) +#define HR_FDBMDRD_REPRIO_TC_SHIFT 12 +#define HR_FDBMDRD_REPRIO_TC_MASK GENMASK(14, 12) +#define HR_FDBMDRD_REPRIO_EN BIT(15) + +#define HR_FDBWDL (0x9d * 2) +#define HR_FDBWDM (0x9e * 2) +#define HR_FDBWDH (0x9f * 2) +#define HR_FDBWRM0 (0xa0 * 2) +#define HR_FDBWRM0_PORTMASK_SHIFT 0 +#define HR_FDBWRM0_PORTMASK_MASK GENMASK(3, 0) +#define HR_FDBWRM0_OBT BIT(8) +#define HR_FDBWRM0_PASS_BLOCKED BIT(9) +#define HR_FDBWRM0_REPRIO_TC_SHIFT 12 +#define HR_FDBWRM0_REPRIO_TC_MASK GENMASK(14, 12) +#define HR_FDBWRM0_REPRIO_EN BIT(15) +#define HR_FDBWRM1 (0xa1 * 2) + +#define HR_FDBWRCMD (0xa2 * 2) +#define HR_FDBWRCMD_FDBDEL BIT(9) + +#define HR_SWCFG (0xa3 * 2) +#define HR_SWCFG_GM_STATEMD BIT(15) +#define HR_SWCFG_LAS_MODE_SHIFT 12 +#define HR_SWCFG_LAS_MODE_MASK GENMASK(13, 12) +#define HR_SWCFG_LAS_OFF (0x00) +#define HR_SWCFG_LAS_ON (0x01) +#define HR_SWCFG_LAS_STATIC (0x10) +#define HR_SWCFG_CT_EN BIT(11) +#define HR_SWCFG_VLAN_UNAWARE BIT(10) +#define HR_SWCFG_ALWAYS_OBT BIT(9) +#define HR_SWCFG_FDBAGE_EN BIT(5) +#define HR_SWCFG_FDBLRN_EN BIT(4) + +#define HR_SWSTAT (0xa4 * 2) +#define HR_SWSTAT_FAIL BIT(4) +#define HR_SWSTAT_BUSY BIT(0) + +#define HR_SWCMD (0xa5 * 2) +#define HW_SWCMD_FLUSH BIT(0) + +#define HR_VIDCFG (0xaa * 2) +#define HR_VIDCFG_VID_SHIFT 0 +#define HR_VIDCFG_VID_MASK GENMASK(11, 0) +#define HR_VIDCFG_PVID BIT(12) + +#define HR_VIDMBRCFG (0xab * 2) +#define HR_VIDMBRCFG_P0MBR_SHIFT 0 +#define HR_VIDMBRCFG_P0MBR_MASK GENMASK(1, 0) +#define HR_VIDMBRCFG_P1MBR_SHIFT 2 +#define HR_VIDMBRCFG_P1MBR_MASK GENMASK(3, 2) +#define HR_VIDMBRCFG_P2MBR_SHIFT 4 +#define HR_VIDMBRCFG_P2MBR_MASK GENMASK(5, 4) +#define HR_VIDMBRCFG_P3MBR_SHIFT 6 +#define HR_VIDMBRCFG_P3MBR_MASK GENMASK(7, 6) + +#define HR_FEABITS0 (0xac * 2) +#define HR_FEABITS0_FDBBINS_SHIFT 4 +#define HR_FEABITS0_FDBBINS_MASK GENMASK(7, 4) +#define HR_FEABITS0_PCNT_SHIFT 8 +#define HR_FEABITS0_PCNT_MASK GENMASK(11, 8) +#define HR_FEABITS0_MCNT_SHIFT 12 +#define HR_FEABITS0_MCNT_MASK GENMASK(15, 12) + +#define TR_QTRACK (0xb1 * 2) +#define TR_TGDVER (0xb3 * 2) +#define TR_TGDVER_REV_MIN_MASK GENMASK(7, 0) +#define TR_TGDVER_REV_MIN_SHIFT 0 +#define TR_TGDVER_REV_MAJ_MASK GENMASK(15, 8) +#define TR_TGDVER_REV_MAJ_SHIFT 8 +#define TR_TGDSEL (0xb4 * 2) +#define TR_TGDSEL_TDGSEL_MASK GENMASK(1, 0) +#define TR_TGDSEL_TDGSEL_SHIFT 0 +#define TR_TGDCTRL (0xb5 * 2) +#define TR_TGDCTRL_GATE_EN BIT(0) +#define TR_TGDCTRL_CYC_SNAP BIT(4) +#define TR_TGDCTRL_SNAP_EST BIT(5) +#define TR_TGDCTRL_ADMINGATESTATES_MASK GENMASK(15, 8) +#define TR_TGDCTRL_ADMINGATESTATES_SHIFT 8 +#define TR_TGDSTAT0 (0xb6 * 2) +#define TR_TGDSTAT1 (0xb7 * 2) +#define TR_ESTWRL (0xb8 * 2) +#define TR_ESTWRH (0xb9 * 2) +#define TR_ESTCMD (0xba * 2) +#define TR_ESTCMD_ESTSEC_MASK GENMASK(2, 0) +#define TR_ESTCMD_ESTSEC_SHIFT 0 +#define TR_ESTCMD_ESTARM BIT(4) +#define TR_ESTCMD_ESTSWCFG BIT(5) +#define TR_EETWRL (0xbb * 2) +#define TR_EETWRH (0xbc * 2) +#define TR_EETCMD (0xbd * 2) +#define TR_EETCMD_EETSEC_MASK GEMASK(2, 0) +#define TR_EETCMD_EETSEC_SHIFT 0 +#define TR_EETCMD_EETARM BIT(4) +#define TR_CTWRL (0xbe * 2) +#define TR_CTWRH (0xbf * 2) +#define TR_LCNSL (0xc1 * 2) +#define TR_LCNSH (0xc2 * 2) +#define TR_LCS (0xc3 * 2) +#define TR_GCLDAT (0xc4 * 2) +#define TR_GCLDAT_GCLWRGATES_MASK GENMASK(7, 0) +#define TR_GCLDAT_GCLWRGATES_SHIFT 0 +#define TR_GCLDAT_GCLWRLAST BIT(8) +#define TR_GCLDAT_GCLOVRI BIT(9) +#define TR_GCLTIL (0xc5 * 2) +#define TR_GCLTIH (0xc6 * 2) +#define TR_GCLCMD (0xc7 * 2) +#define TR_GCLCMD_GCLWRADR_MASK GENMASK(7, 0) +#define TR_GCLCMD_GCLWRADR_SHIFT 0 +#define TR_GCLCMD_INIT_GATE_STATES_MASK GENMASK(15, 8) +#define TR_GCLCMD_INIT_GATE_STATES_SHIFT 8 + +struct hellcreek_counter { + u8 offset; + const char *name; +}; + +struct hellcreek; + +/* State flags for hellcreek_port_hwtstamp::state */ +enum { + HELLCREEK_HWTSTAMP_ENABLED, + HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, +}; + +/* A structure to hold hardware timestamping information per port */ +struct hellcreek_port_hwtstamp { + /* Timestamping state */ + unsigned long state; + + /* Resources for receive timestamping */ + struct sk_buff_head rx_queue; /* For synchronization messages */ + + /* Resources for transmit timestamping */ + unsigned long tx_tstamp_start; + struct sk_buff *tx_skb; + + /* Current timestamp configuration */ + struct hwtstamp_config tstamp_config; +}; + +struct hellcreek_port { + struct hellcreek *hellcreek; + unsigned long *vlan_dev_bitmap; + int port; + u16 ptcfg; /* ptcfg shadow */ + u64 *counter_values; + + /* Per-port timestamping resources */ + struct hellcreek_port_hwtstamp port_hwtstamp; +}; + +struct hellcreek_fdb_entry { + size_t idx; + unsigned char mac[ETH_ALEN]; + u8 portmask; + u8 age; + u8 is_obt; + u8 pass_blocked; + u8 is_static; + u8 reprio_tc; + u8 reprio_en; +}; + +struct hellcreek { + const struct hellcreek_platform_data *pdata; + struct device *dev; + struct dsa_switch *ds; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct hellcreek_port *ports; + struct delayed_work overflow_work; + struct led_classdev led_is_gm; + struct led_classdev led_sync_good; + struct mutex reg_lock; /* Switch IP register lock */ + struct mutex vlan_lock; /* VLAN bitmaps lock */ + struct mutex ptp_lock; /* PTP IP register lock */ + void __iomem *base; + void __iomem *ptp_base; + u16 swcfg; /* swcfg shadow */ + u8 *vidmbrcfg; /* vidmbrcfg shadow */ + u64 seconds; /* PTP seconds */ + u64 last_ts; /* Used for overflow detection */ + u16 status_out; /* ptp.status_out shadow */ + size_t fdb_entries; +}; + +#endif /* _HELLCREEK_H_ */ diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c new file mode 100644 index 000000000000..69dd9a2e8bb6 --- /dev/null +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * DSA driver for: + * Hirschmann Hellcreek TSN switch. + * + * Copyright (C) 2019,2020 Hochschule Offenburg + * Copyright (C) 2019,2020 Linutronix GmbH + * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de> + * Kurt Kanzenbach <kurt@linutronix.de> + */ + +#include <linux/ptp_classify.h> + +#include "hellcreek.h" +#include "hellcreek_hwtstamp.h" +#include "hellcreek_ptp.h" + +int hellcreek_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info) +{ + struct hellcreek *hellcreek = ds->priv; + + info->phc_index = hellcreek->ptp_clock ? + ptp_clock_index(hellcreek->ptp_clock) : -1; + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + /* enabled tx timestamping */ + info->tx_types = BIT(HWTSTAMP_TX_ON); + + /* L2 & L4 PTPv2 event rx messages are timestamped */ + info->rx_filters = BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +/* Enabling/disabling TX and RX HW timestamping for different PTP messages is + * not available in the switch. Thus, this function only serves as a check if + * the user requested what is actually available or not + */ +static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port, + struct hwtstamp_config *config) +{ + struct hellcreek_port_hwtstamp *ps = + &hellcreek->ports[port].port_hwtstamp; + bool tx_tstamp_enable = false; + bool rx_tstamp_enable = false; + + /* Interaction with the timestamp hardware is prevented here. It is + * enabled when this config function ends successfully + */ + clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state); + + /* Reserved for future extensions */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_ON: + tx_tstamp_enable = true; + break; + + /* TX HW timestamping can't be disabled on the switch */ + case HWTSTAMP_TX_OFF: + config->tx_type = HWTSTAMP_TX_ON; + break; + + default: + return -ERANGE; + } + + switch (config->rx_filter) { + /* RX HW timestamping can't be disabled on the switch */ + case HWTSTAMP_FILTER_NONE: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + rx_tstamp_enable = true; + break; + + /* RX HW timestamping can't be enabled for all messages on the switch */ + case HWTSTAMP_FILTER_ALL: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + + default: + return -ERANGE; + } + + if (!tx_tstamp_enable) + return -ERANGE; + + if (!rx_tstamp_enable) + return -ERANGE; + + /* If this point is reached, then the requested hwtstamp config is + * compatible with the hwtstamp offered by the switch. Therefore, + * enable the interaction with the HW timestamping + */ + set_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state); + + return 0; +} + +int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port_hwtstamp *ps; + struct hwtstamp_config config; + int err; + + ps = &hellcreek->ports[port].port_hwtstamp; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + err = hellcreek_set_hwtstamp_config(hellcreek, port, &config); + if (err) + return err; + + /* Save the chosen configuration to be returned later */ + memcpy(&ps->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port_hwtstamp *ps; + struct hwtstamp_config *config; + + ps = &hellcreek->ports[port].port_hwtstamp; + config = &ps->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? + -EFAULT : 0; +} + +/* Returns a pointer to the PTP header if the caller should time stamp, or NULL + * if the caller should not. + */ +static struct ptp_header *hellcreek_should_tstamp(struct hellcreek *hellcreek, + int port, struct sk_buff *skb, + unsigned int type) +{ + struct hellcreek_port_hwtstamp *ps = + &hellcreek->ports[port].port_hwtstamp; + struct ptp_header *hdr; + + hdr = ptp_parse_header(skb, type); + if (!hdr) + return NULL; + + if (!test_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state)) + return NULL; + + return hdr; +} + +static u64 hellcreek_get_reserved_field(const struct ptp_header *hdr) +{ + return be32_to_cpu(hdr->reserved2); +} + +static void hellcreek_clear_reserved_field(struct ptp_header *hdr) +{ + hdr->reserved2 = 0; +} + +static int hellcreek_ptp_hwtstamp_available(struct hellcreek *hellcreek, + unsigned int ts_reg) +{ + u16 status; + + status = hellcreek_ptp_read(hellcreek, ts_reg); + + if (status & PR_TS_STATUS_TS_LOST) + dev_err(hellcreek->dev, + "Tx time stamp lost! This should never happen!\n"); + + /* If hwtstamp is not available, this means the previous hwtstamp was + * successfully read, and the one we need is not yet available + */ + return (status & PR_TS_STATUS_TS_AVAIL) ? 1 : 0; +} + +/* Get nanoseconds timestamp from timestamping unit */ +static u64 hellcreek_ptp_hwtstamp_read(struct hellcreek *hellcreek, + unsigned int ts_reg) +{ + u16 nsl, nsh; + + nsh = hellcreek_ptp_read(hellcreek, ts_reg); + nsh = hellcreek_ptp_read(hellcreek, ts_reg); + nsh = hellcreek_ptp_read(hellcreek, ts_reg); + nsh = hellcreek_ptp_read(hellcreek, ts_reg); + nsl = hellcreek_ptp_read(hellcreek, ts_reg); + + return (u64)nsl | ((u64)nsh << 16); +} + +static int hellcreek_txtstamp_work(struct hellcreek *hellcreek, + struct hellcreek_port_hwtstamp *ps, int port) +{ + struct skb_shared_hwtstamps shhwtstamps; + unsigned int status_reg, data_reg; + struct sk_buff *tmp_skb; + int ts_status; + u64 ns = 0; + + if (!ps->tx_skb) + return 0; + + switch (port) { + case 2: + status_reg = PR_TS_TX_P1_STATUS_C; + data_reg = PR_TS_TX_P1_DATA_C; + break; + case 3: + status_reg = PR_TS_TX_P2_STATUS_C; + data_reg = PR_TS_TX_P2_DATA_C; + break; + default: + dev_err(hellcreek->dev, "Wrong port for timestamping!\n"); + return 0; + } + + ts_status = hellcreek_ptp_hwtstamp_available(hellcreek, status_reg); + + /* Not available yet? */ + if (ts_status == 0) { + /* Check whether the operation of reading the tx timestamp has + * exceeded its allowed period + */ + if (time_is_before_jiffies(ps->tx_tstamp_start + + TX_TSTAMP_TIMEOUT)) { + dev_err(hellcreek->dev, + "Timeout while waiting for Tx timestamp!\n"); + goto free_and_clear_skb; + } + + /* The timestamp should be available quickly, while getting it + * in high priority. Restart the work + */ + return 1; + } + + mutex_lock(&hellcreek->ptp_lock); + ns = hellcreek_ptp_hwtstamp_read(hellcreek, data_reg); + ns += hellcreek_ptp_gettime_seconds(hellcreek, ns); + mutex_unlock(&hellcreek->ptp_lock); + + /* Now we have the timestamp in nanoseconds, store it in the correct + * structure in order to send it to the user + */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + + tmp_skb = ps->tx_skb; + ps->tx_skb = NULL; + + /* skb_complete_tx_timestamp() frees up the client to make another + * timestampable transmit. We have to be ready for it by clearing the + * ps->tx_skb "flag" beforehand + */ + clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state); + + /* Deliver a clone of the original outgoing tx_skb with tx hwtstamp */ + skb_complete_tx_timestamp(tmp_skb, &shhwtstamps); + + return 0; + +free_and_clear_skb: + dev_kfree_skb_any(ps->tx_skb); + ps->tx_skb = NULL; + clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state); + + return 0; +} + +static void hellcreek_get_rxts(struct hellcreek *hellcreek, + struct hellcreek_port_hwtstamp *ps, + struct sk_buff *skb, struct sk_buff_head *rxq, + int port) +{ + struct skb_shared_hwtstamps *shwt; + struct sk_buff_head received; + unsigned long flags; + + /* The latched timestamp belongs to one of the received frames. */ + __skb_queue_head_init(&received); + + /* Lock & disable interrupts */ + spin_lock_irqsave(&rxq->lock, flags); + + /* Add the reception queue "rxq" to the "received" queue an reintialize + * "rxq". From now on, we deal with "received" not with "rxq" + */ + skb_queue_splice_tail_init(rxq, &received); + + spin_unlock_irqrestore(&rxq->lock, flags); + + for (; skb; skb = __skb_dequeue(&received)) { + struct ptp_header *hdr; + unsigned int type; + u64 ns; + + /* Get nanoseconds from ptp packet */ + type = SKB_PTP_TYPE(skb); + hdr = ptp_parse_header(skb, type); + ns = hellcreek_get_reserved_field(hdr); + hellcreek_clear_reserved_field(hdr); + + /* Add seconds part */ + mutex_lock(&hellcreek->ptp_lock); + ns += hellcreek_ptp_gettime_seconds(hellcreek, ns); + mutex_unlock(&hellcreek->ptp_lock); + + /* Save time stamp */ + shwt = skb_hwtstamps(skb); + memset(shwt, 0, sizeof(*shwt)); + shwt->hwtstamp = ns_to_ktime(ns); + netif_rx_ni(skb); + } +} + +static void hellcreek_rxtstamp_work(struct hellcreek *hellcreek, + struct hellcreek_port_hwtstamp *ps, + int port) +{ + struct sk_buff *skb; + + skb = skb_dequeue(&ps->rx_queue); + if (skb) + hellcreek_get_rxts(hellcreek, ps, skb, &ps->rx_queue, port); +} + +long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp) +{ + struct hellcreek *hellcreek = ptp_to_hellcreek(ptp); + struct dsa_switch *ds = hellcreek->ds; + int i, restart = 0; + + for (i = 0; i < ds->num_ports; i++) { + struct hellcreek_port_hwtstamp *ps; + + if (!dsa_is_user_port(ds, i)) + continue; + + ps = &hellcreek->ports[i].port_hwtstamp; + + if (test_bit(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state)) + restart |= hellcreek_txtstamp_work(hellcreek, ps, i); + + hellcreek_rxtstamp_work(hellcreek, ps, i); + } + + return restart ? 1 : -1; +} + +bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port_hwtstamp *ps; + struct ptp_header *hdr; + + ps = &hellcreek->ports[port].port_hwtstamp; + + /* Check if the driver is expected to do HW timestamping */ + if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP)) + return false; + + /* Make sure the message is a PTP message that needs to be timestamped + * and the interaction with the HW timestamping is enabled. If not, stop + * here + */ + hdr = hellcreek_should_tstamp(hellcreek, port, clone, type); + if (!hdr) + return false; + + if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, + &ps->state)) + return false; + + ps->tx_skb = clone; + + /* store the number of ticks occurred since system start-up till this + * moment + */ + ps->tx_tstamp_start = jiffies; + + ptp_schedule_worker(hellcreek->ptp_clock, 0); + + return true; +} + +bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *skb, unsigned int type) +{ + struct hellcreek *hellcreek = ds->priv; + struct hellcreek_port_hwtstamp *ps; + struct ptp_header *hdr; + + ps = &hellcreek->ports[port].port_hwtstamp; + + /* This check only fails if the user did not initialize hardware + * timestamping beforehand. + */ + if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT) + return false; + + /* Make sure the message is a PTP message that needs to be timestamped + * and the interaction with the HW timestamping is enabled. If not, stop + * here + */ + hdr = hellcreek_should_tstamp(hellcreek, port, skb, type); + if (!hdr) + return false; + + SKB_PTP_TYPE(skb) = type; + + skb_queue_tail(&ps->rx_queue, skb); + + ptp_schedule_worker(hellcreek->ptp_clock, 0); + + return true; +} + +static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port) +{ + struct hellcreek_port_hwtstamp *ps = + &hellcreek->ports[port].port_hwtstamp; + + skb_queue_head_init(&ps->rx_queue); +} + +int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek) +{ + struct dsa_switch *ds = hellcreek->ds; + int i; + + /* Initialize timestamping ports. */ + for (i = 0; i < ds->num_ports; ++i) { + if (!dsa_is_user_port(ds, i)) + continue; + + hellcreek_hwtstamp_port_setup(hellcreek, i); + } + + /* Select the synchronized clock as the source timekeeper for the + * timestamps and enable inline timestamping. + */ + hellcreek_ptp_write(hellcreek, PR_SETTINGS_C_TS_SRC_TK_MASK | + PR_SETTINGS_C_RES3TS, + PR_SETTINGS_C); + + return 0; +} + +void hellcreek_hwtstamp_free(struct hellcreek *hellcreek) +{ + /* Nothing todo */ +} diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h new file mode 100644 index 000000000000..c0745ffa1ebb --- /dev/null +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * DSA driver for: + * Hirschmann Hellcreek TSN switch. + * + * Copyright (C) 2019,2020 Hochschule Offenburg + * Copyright (C) 2019,2020 Linutronix GmbH + * Authors: Kurt Kanzenbach <kurt@linutronix.de> + * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de> + */ + +#ifndef _HELLCREEK_HWTSTAMP_H_ +#define _HELLCREEK_HWTSTAMP_H_ + +#include <net/dsa.h> +#include "hellcreek.h" + +/* Timestamp Register */ +#define PR_TS_RX_P1_STATUS_C (0x1d * 2) +#define PR_TS_RX_P1_DATA_C (0x1e * 2) +#define PR_TS_TX_P1_STATUS_C (0x1f * 2) +#define PR_TS_TX_P1_DATA_C (0x20 * 2) +#define PR_TS_RX_P2_STATUS_C (0x25 * 2) +#define PR_TS_RX_P2_DATA_C (0x26 * 2) +#define PR_TS_TX_P2_STATUS_C (0x27 * 2) +#define PR_TS_TX_P2_DATA_C (0x28 * 2) + +#define PR_TS_STATUS_TS_AVAIL BIT(2) +#define PR_TS_STATUS_TS_LOST BIT(3) + +#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb)) + +/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX + * timestamp. When working properly, hardware will produce a timestamp + * within 1ms. Software may enounter delays, so the timeout is set + * accordingly. + */ +#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40) + +int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port, + struct ifreq *ifr); +int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port, + struct ifreq *ifr); + +bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type); +bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port, + struct sk_buff *clone, unsigned int type); + +int hellcreek_get_ts_info(struct dsa_switch *ds, int port, + struct ethtool_ts_info *info); + +long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp); + +int hellcreek_hwtstamp_setup(struct hellcreek *chip); +void hellcreek_hwtstamp_free(struct hellcreek *chip); + +#endif /* _HELLCREEK_HWTSTAMP_H_ */ diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c new file mode 100644 index 000000000000..2572c6087bb5 --- /dev/null +++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * DSA driver for: + * Hirschmann Hellcreek TSN switch. + * + * Copyright (C) 2019,2020 Hochschule Offenburg + * Copyright (C) 2019,2020 Linutronix GmbH + * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de> + * Kurt Kanzenbach <kurt@linutronix.de> + */ + +#include <linux/ptp_clock_kernel.h> +#include "hellcreek.h" +#include "hellcreek_ptp.h" +#include "hellcreek_hwtstamp.h" + +u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset) +{ + return readw(hellcreek->ptp_base + offset); +} + +void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data, + unsigned int offset) +{ + writew(data, hellcreek->ptp_base + offset); +} + +/* Get nanoseconds from PTP clock */ +static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek) +{ + u16 nsl, nsh; + + /* Take a snapshot */ + hellcreek_ptp_write(hellcreek, PR_COMMAND_C_SS, PR_COMMAND_C); + + /* The time of the day is saved as 96 bits. However, due to hardware + * limitations the seconds are not or only partly kept in the PTP + * core. Currently only three bits for the seconds are available. That's + * why only the nanoseconds are used and the seconds are tracked in + * software. Anyway due to internal locking all five registers should be + * read. + */ + nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C); + + return (u64)nsl | ((u64)nsh << 16); +} + +static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek) +{ + u64 ns; + + ns = hellcreek_ptp_clock_read(hellcreek); + if (ns < hellcreek->last_ts) + hellcreek->seconds++; + hellcreek->last_ts = ns; + ns += hellcreek->seconds * NSEC_PER_SEC; + + return ns; +} + +/* Retrieve the seconds parts in nanoseconds for a packet timestamped with @ns. + * There has to be a check whether an overflow occurred between the packet + * arrival and now. If so use the correct seconds (-1) for calculating the + * packet arrival time. + */ +u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns) +{ + u64 s; + + __hellcreek_ptp_gettime(hellcreek); + if (hellcreek->last_ts > ns) + s = hellcreek->seconds * NSEC_PER_SEC; + else + s = (hellcreek->seconds - 1) * NSEC_PER_SEC; + + return s; +} + +static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct hellcreek *hellcreek = ptp_to_hellcreek(ptp); + u64 ns; + + mutex_lock(&hellcreek->ptp_lock); + ns = __hellcreek_ptp_gettime(hellcreek); + mutex_unlock(&hellcreek->ptp_lock); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int hellcreek_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct hellcreek *hellcreek = ptp_to_hellcreek(ptp); + u16 secl, nsh, nsl; + + secl = ts->tv_sec & 0xffff; + nsh = ((u32)ts->tv_nsec & 0xffff0000) >> 16; + nsl = ts->tv_nsec & 0xffff; + + mutex_lock(&hellcreek->ptp_lock); + + /* Update overflow data structure */ + hellcreek->seconds = ts->tv_sec; + hellcreek->last_ts = ts->tv_nsec; + + /* Set time in clock */ + hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C); + hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C); + hellcreek_ptp_write(hellcreek, secl, PR_CLOCK_WRITE_C); + hellcreek_ptp_write(hellcreek, nsh, PR_CLOCK_WRITE_C); + hellcreek_ptp_write(hellcreek, nsl, PR_CLOCK_WRITE_C); + + mutex_unlock(&hellcreek->ptp_lock); + + return 0; +} + +static int hellcreek_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct hellcreek *hellcreek = ptp_to_hellcreek(ptp); + u16 negative = 0, addendh, addendl; + u32 addend; + u64 adj; + + if (scaled_ppm < 0) { + negative = 1; + scaled_ppm = -scaled_ppm; + } + + /* IP-Core adjusts the nominal frequency by adding or subtracting 1 ns + * from the 8 ns (period of the oscillator) every time the accumulator + * register overflows. The value stored in the addend register is added + * to the accumulator register every 8 ns. + * + * addend value = (2^30 * accumulator_overflow_rate) / + * oscillator_frequency + * where: + * + * oscillator_frequency = 125 MHz + * accumulator_overflow_rate = 125 MHz * scaled_ppm * 2^-16 * 10^-6 * 8 + */ + adj = scaled_ppm; + adj <<= 11; + addend = (u32)div_u64(adj, 15625); + + addendh = (addend & 0xffff0000) >> 16; + addendl = addend & 0xffff; + + negative = (negative << 15) & 0x8000; + + mutex_lock(&hellcreek->ptp_lock); + + /* Set drift register */ + hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_DRIFT_C); + hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C); + hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C); + hellcreek_ptp_write(hellcreek, addendh, PR_CLOCK_DRIFT_C); + hellcreek_ptp_write(hellcreek, addendl, PR_CLOCK_DRIFT_C); + + mutex_unlock(&hellcreek->ptp_lock); + + return 0; +} + +static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct hellcreek *hellcreek = ptp_to_hellcreek(ptp); + u16 negative = 0, counth, countl; + u32 count_val; + + /* If the offset is larger than IP-Core slow offset resources. Don't + * consider slow adjustment. Rather, add the offset directly to the + * current time + */ + if (abs(delta) > MAX_SLOW_OFFSET_ADJ) { + struct timespec64 now, then = ns_to_timespec64(delta); + + hellcreek_ptp_gettime(ptp, &now); + now = timespec64_add(now, then); + hellcreek_ptp_settime(ptp, &now); + + return 0; + } + + if (delta < 0) { + negative = 1; + delta = -delta; + } + + /* 'count_val' does not exceed the maximum register size (2^30) */ + count_val = div_s64(delta, MAX_NS_PER_STEP); + + counth = (count_val & 0xffff0000) >> 16; + countl = count_val & 0xffff; + + negative = (negative << 15) & 0x8000; + + mutex_lock(&hellcreek->ptp_lock); + + /* Set offset write register */ + hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_OFFSET_C); + hellcreek_ptp_write(hellcreek, MAX_NS_PER_STEP, PR_CLOCK_OFFSET_C); + hellcreek_ptp_write(hellcreek, MIN_CLK_CYCLES_BETWEEN_STEPS, + PR_CLOCK_OFFSET_C); + hellcreek_ptp_write(hellcreek, countl, PR_CLOCK_OFFSET_C); + hellcreek_ptp_write(hellcreek, counth, PR_CLOCK_OFFSET_C); + + mutex_unlock(&hellcreek->ptp_lock); + + return 0; +} + +static int hellcreek_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +static void hellcreek_ptp_overflow_check(struct work_struct *work) +{ + struct delayed_work *dw = to_delayed_work(work); + struct hellcreek *hellcreek; + + hellcreek = dw_overflow_to_hellcreek(dw); + + mutex_lock(&hellcreek->ptp_lock); + __hellcreek_ptp_gettime(hellcreek); + mutex_unlock(&hellcreek->ptp_lock); + + schedule_delayed_work(&hellcreek->overflow_work, + HELLCREEK_OVERFLOW_PERIOD); +} + +static enum led_brightness hellcreek_get_brightness(struct hellcreek *hellcreek, + int led) +{ + return (hellcreek->status_out & led) ? 1 : 0; +} + +static void hellcreek_set_brightness(struct hellcreek *hellcreek, int led, + enum led_brightness b) +{ + mutex_lock(&hellcreek->ptp_lock); + + if (b) + hellcreek->status_out |= led; + else + hellcreek->status_out &= ~led; + + hellcreek_ptp_write(hellcreek, hellcreek->status_out, STATUS_OUT); + + mutex_unlock(&hellcreek->ptp_lock); +} + +static void hellcreek_led_sync_good_set(struct led_classdev *ldev, + enum led_brightness b) +{ + struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good); + + hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, b); +} + +static enum led_brightness hellcreek_led_sync_good_get(struct led_classdev *ldev) +{ + struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good); + + return hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD); +} + +static void hellcreek_led_is_gm_set(struct led_classdev *ldev, + enum led_brightness b) +{ + struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm); + + hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, b); +} + +static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev) +{ + struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm); + + return hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM); +} + +/* There two available LEDs internally called sync_good and is_gm. However, the + * user might want to use a different label and specify the default state. Take + * those properties from device tree. + */ +static int hellcreek_led_setup(struct hellcreek *hellcreek) +{ + struct device_node *leds, *led = NULL; + const char *label, *state; + int ret = -EINVAL; + + leds = of_find_node_by_name(hellcreek->dev->of_node, "leds"); + if (!leds) { + dev_err(hellcreek->dev, "No LEDs specified in device tree!\n"); + return ret; + } + + hellcreek->status_out = 0; + + led = of_get_next_available_child(leds, led); + if (!led) { + dev_err(hellcreek->dev, "First LED not specified!\n"); + goto out; + } + + ret = of_property_read_string(led, "label", &label); + hellcreek->led_sync_good.name = ret ? "sync_good" : label; + + ret = of_property_read_string(led, "default-state", &state); + if (!ret) { + if (!strcmp(state, "on")) + hellcreek->led_sync_good.brightness = 1; + else if (!strcmp(state, "off")) + hellcreek->led_sync_good.brightness = 0; + else if (!strcmp(state, "keep")) + hellcreek->led_sync_good.brightness = + hellcreek_get_brightness(hellcreek, + STATUS_OUT_SYNC_GOOD); + } + + hellcreek->led_sync_good.max_brightness = 1; + hellcreek->led_sync_good.brightness_set = hellcreek_led_sync_good_set; + hellcreek->led_sync_good.brightness_get = hellcreek_led_sync_good_get; + + led = of_get_next_available_child(leds, led); + if (!led) { + dev_err(hellcreek->dev, "Second LED not specified!\n"); + ret = -EINVAL; + goto out; + } + + ret = of_property_read_string(led, "label", &label); + hellcreek->led_is_gm.name = ret ? "is_gm" : label; + + ret = of_property_read_string(led, "default-state", &state); + if (!ret) { + if (!strcmp(state, "on")) + hellcreek->led_is_gm.brightness = 1; + else if (!strcmp(state, "off")) + hellcreek->led_is_gm.brightness = 0; + else if (!strcmp(state, "keep")) + hellcreek->led_is_gm.brightness = + hellcreek_get_brightness(hellcreek, + STATUS_OUT_IS_GM); + } + + hellcreek->led_is_gm.max_brightness = 1; + hellcreek->led_is_gm.brightness_set = hellcreek_led_is_gm_set; + hellcreek->led_is_gm.brightness_get = hellcreek_led_is_gm_get; + + /* Set initial state */ + if (hellcreek->led_sync_good.brightness == 1) + hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, 1); + if (hellcreek->led_is_gm.brightness == 1) + hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1); + + /* Register both leds */ + led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good); + led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm); + + ret = 0; + +out: + of_node_put(leds); + + return ret; +} + +int hellcreek_ptp_setup(struct hellcreek *hellcreek) +{ + u16 status; + int ret; + + /* Set up the overflow work */ + INIT_DELAYED_WORK(&hellcreek->overflow_work, + hellcreek_ptp_overflow_check); + + /* Setup PTP clock */ + hellcreek->ptp_clock_info.owner = THIS_MODULE; + snprintf(hellcreek->ptp_clock_info.name, + sizeof(hellcreek->ptp_clock_info.name), + dev_name(hellcreek->dev)); + + /* IP-Core can add up to 0.5 ns per 8 ns cycle, which means + * accumulator_overflow_rate shall not exceed 62.5 MHz (which adjusts + * the nominal frequency by 6.25%) + */ + hellcreek->ptp_clock_info.max_adj = 62500000; + hellcreek->ptp_clock_info.n_alarm = 0; + hellcreek->ptp_clock_info.n_pins = 0; + hellcreek->ptp_clock_info.n_ext_ts = 0; + hellcreek->ptp_clock_info.n_per_out = 0; + hellcreek->ptp_clock_info.pps = 0; + hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine; + hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime; + hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime; + hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime; + hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable; + hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work; + + hellcreek->ptp_clock = ptp_clock_register(&hellcreek->ptp_clock_info, + hellcreek->dev); + if (IS_ERR(hellcreek->ptp_clock)) + return PTR_ERR(hellcreek->ptp_clock); + + /* Enable the offset correction process, if no offset correction is + * already taking place + */ + status = hellcreek_ptp_read(hellcreek, PR_CLOCK_STATUS_C); + if (!(status & PR_CLOCK_STATUS_C_OFS_ACT)) + hellcreek_ptp_write(hellcreek, + status | PR_CLOCK_STATUS_C_ENA_OFS, + PR_CLOCK_STATUS_C); + + /* Enable the drift correction process */ + hellcreek_ptp_write(hellcreek, status | PR_CLOCK_STATUS_C_ENA_DRIFT, + PR_CLOCK_STATUS_C); + + /* LED setup */ + ret = hellcreek_led_setup(hellcreek); + if (ret) { + if (hellcreek->ptp_clock) + ptp_clock_unregister(hellcreek->ptp_clock); + return ret; + } + + schedule_delayed_work(&hellcreek->overflow_work, + HELLCREEK_OVERFLOW_PERIOD); + + return 0; +} + +void hellcreek_ptp_free(struct hellcreek *hellcreek) +{ + led_classdev_unregister(&hellcreek->led_is_gm); + led_classdev_unregister(&hellcreek->led_sync_good); + cancel_delayed_work_sync(&hellcreek->overflow_work); + if (hellcreek->ptp_clock) + ptp_clock_unregister(hellcreek->ptp_clock); + hellcreek->ptp_clock = NULL; +} diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.h b/drivers/net/dsa/hirschmann/hellcreek_ptp.h new file mode 100644 index 000000000000..0b51392c7e56 --- /dev/null +++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * DSA driver for: + * Hirschmann Hellcreek TSN switch. + * + * Copyright (C) 2019,2020 Hochschule Offenburg + * Copyright (C) 2019,2020 Linutronix GmbH + * Authors: Kurt Kanzenbach <kurt@linutronix.de> + * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de> + */ + +#ifndef _HELLCREEK_PTP_H_ +#define _HELLCREEK_PTP_H_ + +#include <linux/bitops.h> +#include <linux/ptp_clock_kernel.h> + +#include "hellcreek.h" + +/* Every jump in time is 7 ns */ +#define MAX_NS_PER_STEP 7L + +/* Correct offset at every clock cycle */ +#define MIN_CLK_CYCLES_BETWEEN_STEPS 0 + +/* Maximum available slow offset resources */ +#define MAX_SLOW_OFFSET_ADJ \ + ((unsigned long long)((1 << 30) - 1) * MAX_NS_PER_STEP) + +/* four times a second overflow check */ +#define HELLCREEK_OVERFLOW_PERIOD (HZ / 4) + +/* PTP Register */ +#define PR_SETTINGS_C (0x09 * 2) +#define PR_SETTINGS_C_RES3TS BIT(4) +#define PR_SETTINGS_C_TS_SRC_TK_SHIFT 8 +#define PR_SETTINGS_C_TS_SRC_TK_MASK GENMASK(9, 8) +#define PR_COMMAND_C (0x0a * 2) +#define PR_COMMAND_C_SS BIT(0) + +#define PR_CLOCK_STATUS_C (0x0c * 2) +#define PR_CLOCK_STATUS_C_ENA_DRIFT BIT(12) +#define PR_CLOCK_STATUS_C_OFS_ACT BIT(13) +#define PR_CLOCK_STATUS_C_ENA_OFS BIT(14) + +#define PR_CLOCK_READ_C (0x0d * 2) +#define PR_CLOCK_WRITE_C (0x0e * 2) +#define PR_CLOCK_OFFSET_C (0x0f * 2) +#define PR_CLOCK_DRIFT_C (0x10 * 2) + +#define PR_SS_FREE_DATA_C (0x12 * 2) +#define PR_SS_SYNT_DATA_C (0x14 * 2) +#define PR_SS_SYNC_DATA_C (0x16 * 2) +#define PR_SS_DRAC_DATA_C (0x18 * 2) + +#define STATUS_OUT (0x60 * 2) +#define STATUS_OUT_SYNC_GOOD BIT(0) +#define STATUS_OUT_IS_GM BIT(1) + +int hellcreek_ptp_setup(struct hellcreek *hellcreek); +void hellcreek_ptp_free(struct hellcreek *hellcreek); +u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset); +void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data, + unsigned int offset); +u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns); + +#define ptp_to_hellcreek(ptp) \ + container_of(ptp, struct hellcreek, ptp_clock_info) + +#define dw_overflow_to_hellcreek(dw) \ + container_of(dw, struct hellcreek, overflow_work) + +#define led_to_hellcreek(ldev, led) \ + container_of(ldev, struct hellcreek, led) + +#endif /* _HELLCREEK_PTP_H_ */ diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 1e101ab56cea..c973db101b72 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -23,7 +23,7 @@ static const struct { char string[ETH_GSTRING_LEN]; -} mib_names[TOTAL_SWITCH_COUNTER_NUM] = { +} mib_names[] = { { "rx_hi" }, { "rx_undersize" }, { "rx_fragments" }, @@ -125,7 +125,7 @@ static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u8 check; int loop; - ctrl_addr = addr + SWITCH_COUNTER_NUM * port; + ctrl_addr = addr + dev->reg_mib_cnt * port; ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); mutex_lock(&dev->alu_mutex); @@ -156,7 +156,7 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, u8 check; int loop; - addr -= SWITCH_COUNTER_NUM; + addr -= dev->reg_mib_cnt; ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port; ctrl_addr += addr + KS_MIB_TOTAL_RX_0; ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ); @@ -418,8 +418,8 @@ static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr) int i; ksz8795_r_table(dev, TABLE_VLAN, addr, &data); - addr *= 4; - for (i = 0; i < 4; i++) { + addr *= dev->phy_port_cnt; + for (i = 0; i < dev->phy_port_cnt; i++) { dev->vlan_cache[addr + i].table[0] = (u16)data; data >>= VLAN_TABLE_S; } @@ -433,7 +433,7 @@ static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) u64 buf; data = (u16 *)&buf; - addr = vid / 4; + addr = vid / dev->phy_port_cnt; index = vid & 3; ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); *vlan = data[index]; @@ -447,7 +447,7 @@ static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) u64 buf; data = (u16 *)&buf; - addr = vid / 4; + addr = vid / dev->phy_port_cnt; index = vid & 3; ksz8795_r_table(dev, TABLE_VLAN, addr, &buf); data[index] = vlan; @@ -654,9 +654,10 @@ static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds, static void ksz8795_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *buf) { + struct ksz_device *dev = ds->priv; int i; - for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + for (i = 0; i < dev->mib_cnt; i++) { memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, ETH_GSTRING_LEN); } @@ -691,12 +692,12 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: data |= PORT_LEARN_DISABLE; - if (port < SWITCH_PORT_NUM) + if (port < dev->phy_port_cnt) member = 0; break; case BR_STATE_LISTENING: data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - if (port < SWITCH_PORT_NUM && + if (port < dev->phy_port_cnt && p->stp_state == BR_STATE_DISABLED) member = dev->host_mask | p->vid_member; break; @@ -720,7 +721,7 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; - if (port < SWITCH_PORT_NUM && + if (port < dev->phy_port_cnt && p->stp_state == BR_STATE_DISABLED) member = dev->host_mask | p->vid_member; break; @@ -750,17 +751,17 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port) { - u8 learn[TOTAL_PORT_NUM]; + u8 learn[DSA_MAX_PORTS]; int first, index, cnt; struct ksz_port *p; - if ((uint)port < TOTAL_PORT_NUM) { + if ((uint)port < dev->port_cnt) { first = port; cnt = port + 1; } else { /* Flush all ports. */ first = 0; - cnt = dev->mib_port_cnt; + cnt = dev->port_cnt; } for (index = first; index < cnt; index++) { p = &dev->ports[index]; @@ -992,8 +993,6 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds) u8 remote; int i; - ds->num_ports = dev->port_cnt + 1; - /* Switch marks the maximum frame with extra byte as oversize. */ ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true); ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true); @@ -1005,7 +1004,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds) ksz8795_port_setup(dev, dev->cpu_port, true); dev->member = dev->host_mask; - for (i = 0; i < SWITCH_PORT_NUM; i++) { + for (i = 0; i < dev->phy_port_cnt; i++) { p = &dev->ports[i]; /* Initialize to non-zero so that ksz_cfg_port_member() will @@ -1016,7 +1015,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds) ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED); /* Last port may be disabled. */ - if (i == dev->port_cnt) + if (i == dev->phy_port_cnt) break; p->on = 1; p->phy = 1; @@ -1085,7 +1084,7 @@ static int ksz8795_setup(struct dsa_switch *ds) (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100); - for (i = 0; i < VLAN_TABLE_ENTRIES; i++) + for (i = 0; i < (dev->num_vlans / 4); i++) ksz8795_r_vlan_entries(dev, i); /* Setup STP address for STP operation. */ @@ -1150,10 +1149,6 @@ static int ksz8795_switch_detect(struct ksz_device *dev) (id2 != CHIP_ID_94 && id2 != CHIP_ID_95)) return -ENODEV; - dev->mib_port_cnt = TOTAL_PORT_NUM; - dev->phy_port_cnt = SWITCH_PORT_NUM; - dev->port_cnt = SWITCH_PORT_NUM; - if (id2 == CHIP_ID_95) { u8 val; @@ -1162,17 +1157,12 @@ static int ksz8795_switch_detect(struct ksz_device *dev) if (val & PORT_FIBER_MODE) id2 = 0x65; } else if (id2 == CHIP_ID_94) { - dev->port_cnt--; - dev->last_port = dev->port_cnt; id2 = 0x94; } id16 &= ~0xff; id16 |= id2; dev->chip_id = id16; - dev->cpu_port = dev->mib_port_cnt - 1; - dev->host_mask = BIT(dev->cpu_port); - return 0; } @@ -1194,7 +1184,7 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = { .num_alus = 0, .num_statics = 8, .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 4, /* total physical port count */ + .port_cnt = 5, /* total cpu and user ports */ }, { .chip_id = 0x8794, @@ -1203,7 +1193,7 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = { .num_alus = 0, .num_statics = 8, .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 3, /* total physical port count */ + .port_cnt = 4, /* total cpu and user ports */ }, { .chip_id = 0x8765, @@ -1212,7 +1202,7 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = { .num_alus = 0, .num_statics = 8, .cpu_ports = 0x10, /* can be configured as cpu port */ - .port_cnt = 4, /* total physical port count */ + .port_cnt = 5, /* total cpu and user ports */ }, }; @@ -1244,27 +1234,32 @@ static int ksz8795_switch_init(struct ksz_device *dev) dev->port_mask = BIT(dev->port_cnt) - 1; dev->port_mask |= dev->host_mask; - dev->reg_mib_cnt = SWITCH_COUNTER_NUM; - dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; + dev->reg_mib_cnt = KSZ8795_COUNTER_NUM; + dev->mib_cnt = ARRAY_SIZE(mib_names); + + dev->phy_port_cnt = dev->port_cnt - 1; + + dev->cpu_port = dev->port_cnt - 1; + dev->host_mask = BIT(dev->cpu_port); - i = dev->mib_port_cnt; - dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i, + dev->ports = devm_kzalloc(dev->dev, + dev->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); if (!dev->ports) return -ENOMEM; - for (i = 0; i < dev->mib_port_cnt; i++) { + for (i = 0; i < dev->port_cnt; i++) { mutex_init(&dev->ports[i].mib.cnt_mutex); dev->ports[i].mib.counters = devm_kzalloc(dev->dev, sizeof(u64) * - (TOTAL_SWITCH_COUNTER_NUM + 1), + (dev->mib_cnt + 1), GFP_KERNEL); if (!dev->ports[i].mib.counters) return -ENOMEM; } /* set the real number of ports */ - dev->ds->num_ports = dev->port_cnt + 1; + dev->ds->num_ports = dev->port_cnt; return 0; } diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h index 3a50462df8fa..40372047d40d 100644 --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h @@ -846,16 +846,7 @@ #define KS_PRIO_IN_REG 4 -#define TOTAL_PORT_NUM 5 - -/* Host port can only be last of them. */ -#define SWITCH_PORT_NUM (TOTAL_PORT_NUM - 1) - #define KSZ8795_COUNTER_NUM 0x20 -#define TOTAL_KSZ8795_COUNTER_NUM (KSZ8795_COUNTER_NUM + 4) - -#define SWITCH_COUNTER_NUM KSZ8795_COUNTER_NUM -#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ8795_COUNTER_NUM /* Common names used by other drivers */ @@ -998,7 +989,6 @@ #define TAIL_TAG_OVERRIDE BIT(6) #define TAIL_TAG_LOOKUP BIT(7) -#define VLAN_TABLE_ENTRIES (4096 / 4) #define FID_ENTRIES 128 #endif diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c index 8b00f8e6c02f..f98432a3e2b5 100644 --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c @@ -49,6 +49,12 @@ static int ksz8795_spi_probe(struct spi_device *spi) if (spi->dev.platform_data) dev->pdata = spi->dev.platform_data; + /* setup spi */ + spi->mode = SPI_MODE_3; + ret = spi_setup(spi); + if (ret) + return ret; + ret = ksz8795_switch_register(dev); /* Main DSA driver may not be started yet. */ diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index abfd3802bb51..42e647b67abd 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -478,7 +478,7 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S, SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); - if (port < dev->mib_port_cnt) { + if (port < dev->port_cnt) { /* flush individual port */ ksz_pread8(dev, port, P_STP_CTRL, &data); if (!(data & PORT_LEARN_DISABLE)) @@ -1267,8 +1267,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) struct ksz_port *p; int i; - ds->num_ports = dev->port_cnt; - for (i = 0; i < dev->port_cnt; i++) { if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) { phy_interface_t interface; @@ -1319,7 +1317,7 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) dev->member = dev->host_mask; - for (i = 0; i < dev->mib_port_cnt; i++) { + for (i = 0; i < dev->port_cnt; i++) { if (i == dev->cpu_port) continue; p = &dev->ports[i]; @@ -1446,7 +1444,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev) return ret; /* Number of ports can be reduced depending on chip. */ - dev->mib_port_cnt = TOTAL_PORT_NUM; dev->phy_port_cnt = 5; /* Default capability is gigabit capable. */ @@ -1463,7 +1460,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev) /* Chip does not support gigabit. */ if (data8 & SW_QW_ABLE) dev->features &= ~GBIT_SUPPORT; - dev->mib_port_cnt = 3; dev->phy_port_cnt = 2; } else { dev_info(dev->dev, "Found KSZ9477 or compatible\n"); @@ -1566,12 +1562,12 @@ static int ksz9477_switch_init(struct ksz_device *dev) dev->reg_mib_cnt = SWITCH_COUNTER_NUM; dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; - i = dev->mib_port_cnt; - dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i, + dev->ports = devm_kzalloc(dev->dev, + dev->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); if (!dev->ports) return -ENOMEM; - for (i = 0; i < dev->mib_port_cnt; i++) { + for (i = 0; i < dev->port_cnt; i++) { mutex_init(&dev->ports[i].mib.cnt_mutex); dev->ports[i].mib.counters = devm_kzalloc(dev->dev, diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index 1142768969c2..15bc11b3cda4 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -48,6 +48,12 @@ static int ksz9477_spi_probe(struct spi_device *spi) if (spi->dev.platform_data) dev->pdata = spi->dev.platform_data; + /* setup spi */ + spi->mode = SPI_MODE_3; + ret = spi_setup(spi); + if (ret) + return ret; + ret = ksz9477_switch_register(dev); /* Main DSA driver may not be started yet. */ diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 0ef854911f21..cf743133b0b9 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -72,7 +72,7 @@ static void ksz_mib_read_work(struct work_struct *work) struct ksz_port *p; int i; - for (i = 0; i < dev->mib_port_cnt; i++) { + for (i = 0; i < dev->port_cnt; i++) { if (dsa_is_unused_port(dev->ds, i)) continue; @@ -103,7 +103,7 @@ void ksz_init_mib_timer(struct ksz_device *dev) INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work); - for (i = 0; i < dev->mib_port_cnt; i++) + for (i = 0; i < dev->port_cnt; i++) dev->dev_ops->port_init_cnt(dev, i); } EXPORT_SYMBOL_GPL(ksz_init_mib_timer); @@ -426,7 +426,9 @@ int ksz_switch_register(struct ksz_device *dev, ret = of_get_phy_mode(dev->dev->of_node, &interface); if (ret == 0) dev->compat_interface = interface; - ports = of_get_child_by_name(dev->dev->of_node, "ports"); + ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); + if (!ports) + ports = of_get_child_by_name(dev->dev->of_node, "ports"); if (ports) for_each_available_child_of_node(ports, port) { if (of_property_read_u32(port, "reg", diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index cf866e48ff66..720f22275c84 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -71,8 +71,6 @@ struct ksz_device { int port_cnt; int reg_mib_cnt; int mib_cnt; - int mib_port_cnt; - int last_port; /* ports after that not used */ phy_interface_t compat_interface; u32 regs_size; bool phy_errata_9477; diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index de7692b763d8..a67cac15a724 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -558,7 +558,7 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) val |= 0x190000 << RG_COREPLL_SDM_PCW_S; mt7530_write(priv, MT7531_PLLGP_CR0, val); break; - }; + } /* Set feedback divide ratio update signal to high */ val = mt7530_read(priv, MT7531_PLLGP_CR0); @@ -870,6 +870,46 @@ mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) return ARRAY_SIZE(mt7530_mib); } +static int +mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) +{ + struct mt7530_priv *priv = ds->priv; + unsigned int secs = msecs / 1000; + unsigned int tmp_age_count; + unsigned int error = -1; + unsigned int age_count; + unsigned int age_unit; + + /* Applied timer is (AGE_CNT + 1) * (AGE_UNIT + 1) seconds */ + if (secs < 1 || secs > (AGE_CNT_MAX + 1) * (AGE_UNIT_MAX + 1)) + return -ERANGE; + + /* iterate through all possible age_count to find the closest pair */ + for (tmp_age_count = 0; tmp_age_count <= AGE_CNT_MAX; ++tmp_age_count) { + unsigned int tmp_age_unit = secs / (tmp_age_count + 1) - 1; + + if (tmp_age_unit <= AGE_UNIT_MAX) { + unsigned int tmp_error = secs - + (tmp_age_count + 1) * (tmp_age_unit + 1); + + /* found a closer pair */ + if (error > tmp_error) { + error = tmp_error; + age_count = tmp_age_count; + age_unit = tmp_age_unit; + } + + /* found the exact match, so break the loop */ + if (!error) + break; + } + } + + mt7530_write(priv, MT7530_AAC, AGE_CNT(age_count) | AGE_UNIT(age_unit)); + + return 0; +} + static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface) { struct mt7530_priv *priv = ds->priv; @@ -1021,6 +1061,53 @@ mt7530_port_disable(struct dsa_switch *ds, int port) mutex_unlock(&priv->reg_mutex); } +static int +mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct mt7530_priv *priv = ds->priv; + struct mii_bus *bus = priv->bus; + int length; + u32 val; + + /* When a new MTU is set, DSA always set the CPU port's MTU to the + * largest MTU of the slave ports. Because the switch only has a global + * RX length register, only allowing CPU port here is enough. + */ + if (!dsa_is_cpu_port(ds, port)) + return 0; + + mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); + + val = mt7530_mii_read(priv, MT7530_GMACCR); + val &= ~MAX_RX_PKT_LEN_MASK; + + /* RX length also includes Ethernet header, MTK tag, and FCS length */ + length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN; + if (length <= 1522) { + val |= MAX_RX_PKT_LEN_1522; + } else if (length <= 1536) { + val |= MAX_RX_PKT_LEN_1536; + } else if (length <= 1552) { + val |= MAX_RX_PKT_LEN_1552; + } else { + val &= ~MAX_RX_JUMBO_MASK; + val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024)); + val |= MAX_RX_PKT_LEN_JUMBO; + } + + mt7530_mii_write(priv, MT7530_GMACCR, val); + + mutex_unlock(&bus->mdio_lock); + + return 0; +} + +static int +mt7530_port_max_mtu(struct dsa_switch *ds, int port) +{ + return MT7530_MAX_MTU; +} + static void mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) { @@ -1570,6 +1657,7 @@ mt7530_setup(struct dsa_switch *ds) */ dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent; ds->configure_vlan_while_not_filtering = true; + ds->mtu_enforcement_ingress = true; if (priv->id == ID_MT7530) { regulator_set_voltage(priv->core_pwr, 1000000, 1000000); @@ -1808,6 +1896,7 @@ mt7531_setup(struct dsa_switch *ds) } ds->configure_vlan_while_not_filtering = true; + ds->mtu_enforcement_ingress = true; /* Flush the FDB table */ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); @@ -2517,8 +2606,11 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .phy_write = mt753x_phy_write, .get_ethtool_stats = mt7530_get_ethtool_stats, .get_sset_count = mt7530_get_sset_count, + .set_ageing_time = mt7530_set_ageing_time, .port_enable = mt7530_port_enable, .port_disable = mt7530_port_disable, + .port_change_mtu = mt7530_port_change_mtu, + .port_max_mtu = mt7530_port_max_mtu, .port_stp_state_set = mt7530_stp_state_set, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 9278a8e3d04e..32d8969b3ace 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -11,6 +11,9 @@ #define MT7530_NUM_FDB_RECORDS 2048 #define MT7530_ALL_MEMBERS 0xff +#define MTK_HDR_LEN 4 +#define MT7530_MAX_MTU (15 * 1024 - ETH_HLEN - ETH_FCS_LEN - MTK_HDR_LEN) + enum mt753x_id { ID_MT7530 = 0, ID_MT7621 = 1, @@ -158,6 +161,19 @@ enum mt7530_vlan_egress_attr { MT7530_VLAN_EGRESS_STACK = 3, }; +/* Register for address age control */ +#define MT7530_AAC 0xa0 +/* Disable ageing */ +#define AGE_DIS BIT(20) +/* Age count */ +#define AGE_CNT_MASK GENMASK(19, 12) +#define AGE_CNT_MAX 0xff +#define AGE_CNT(x) (AGE_CNT_MASK & ((x) << 12)) +/* Age unit */ +#define AGE_UNIT_MASK GENMASK(11, 0) +#define AGE_UNIT_MAX 0xfff +#define AGE_UNIT(x) (AGE_UNIT_MASK & (x)) + /* Register for port STP state control */ #define MT7530_SSP_P(x) (0x2000 + ((x) * 0x100)) #define FID_PST(x) ((x) & 0x3) @@ -289,6 +305,15 @@ enum mt7530_vlan_port_attr { #define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100) #define MT7531_DIS_CLR BIT(31) +#define MT7530_GMACCR 0x30e0 +#define MAX_RX_JUMBO(x) ((x) << 2) +#define MAX_RX_JUMBO_MASK GENMASK(5, 2) +#define MAX_RX_PKT_LEN_MASK GENMASK(1, 0) +#define MAX_RX_PKT_LEN_1522 0x0 +#define MAX_RX_PKT_LEN_1536 0x1 +#define MAX_RX_PKT_LEN_1552 0x2 +#define MAX_RX_PKT_LEN_JUMBO 0x3 + /* Register for MIB */ #define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100) #define MT7530_MIB_CCR 0x4fe0 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 34cca0a4b31c..eafe6bedc692 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -727,8 +727,8 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); if ((!mv88e6xxx_port_ppu_updates(chip, port) || - mode == MLO_AN_FIXED) && ops->port_set_link) - err = ops->port_set_link(chip, port, LINK_FORCED_DOWN); + mode == MLO_AN_FIXED) && ops->port_sync_link) + err = ops->port_sync_link(chip, port, mode, false); mv88e6xxx_reg_unlock(chip); if (err) @@ -768,8 +768,8 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, goto error; } - if (ops->port_set_link) - err = ops->port_set_link(chip, port, LINK_FORCED_UP); + if (ops->port_sync_link) + err = ops->port_sync_link(chip, port, mode, true); } error: mv88e6xxx_reg_unlock(chip); @@ -1347,9 +1347,16 @@ static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip) if (err) return err; - err = mv88e6xxx_g1_atu_set_learn2all(chip, true); - if (err) - return err; + /* The chips that have a "learn2all" bit in Global1, ATU + * Control are precisely those whose port registers have a + * Message Port bit in Port Control 1 and hence implement + * ->port_setup_message_port. + */ + if (chip->info->ops->port_setup_message_port) { + err = mv88e6xxx_g1_atu_set_learn2all(chip, true); + if (err) + return err; + } return mv88e6xxx_g1_atu_set_age_time(chip, 300000); } @@ -1442,7 +1449,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port) static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip) { - if (!chip->info->max_vid) + if (!mv88e6xxx_max_vid(chip)) return 0; return mv88e6xxx_g1_vtu_flush(chip); @@ -1484,7 +1491,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) } /* Set every FID bit used by the VLAN entries */ - vlan.vid = chip->info->max_vid; + vlan.vid = mv88e6xxx_max_vid(chip); vlan.valid = false; do { @@ -1496,7 +1503,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap) break; set_bit(vlan.fid, fid_bitmap); - } while (vlan.vid < chip->info->max_vid); + } while (vlan.vid < mv88e6xxx_max_vid(chip)); return 0; } @@ -1587,7 +1594,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, int err; if (switchdev_trans_ph_prepare(trans)) - return chip->info->max_vid ? 0 : -EOPNOTSUPP; + return mv88e6xxx_max_vid(chip) ? 0 : -EOPNOTSUPP; mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_set_8021q_mode(chip, port, mode); @@ -1603,7 +1610,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int err; - if (!chip->info->max_vid) + if (!mv88e6xxx_max_vid(chip)) return -EOPNOTSUPP; /* If the requested port doesn't belong to the same bridge as the VLAN @@ -1973,7 +1980,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u8 member; u16 vid; - if (!chip->info->max_vid) + if (!mv88e6xxx_max_vid(chip)) return; if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) @@ -2051,7 +2058,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 pvid, vid; int err = 0; - if (!chip->info->max_vid) + if (!mv88e6xxx_max_vid(chip)) return -EOPNOTSUPP; mv88e6xxx_reg_lock(chip); @@ -2157,7 +2164,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, return err; /* Dump VLANs' Filtering Information Databases */ - vlan.vid = chip->info->max_vid; + vlan.vid = mv88e6xxx_max_vid(chip); vlan.valid = false; do { @@ -2172,7 +2179,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, cb, data); if (err) return err; - } while (vlan.vid < chip->info->max_vid); + } while (vlan.vid < mv88e6xxx_max_vid(chip)); return err; } @@ -2853,6 +2860,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) chip->ds = ds; ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip); + ds->configure_vlan_while_not_filtering = true; mv88e6xxx_reg_lock(chip); @@ -3209,6 +3217,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, @@ -3248,6 +3257,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6185_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_set_frame_mode = mv88e6085_port_set_frame_mode, .port_set_egress_floods = mv88e6185_port_set_egress_floods, @@ -3260,6 +3270,9 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .stats_get_strings = mv88e6095_stats_get_strings, .stats_get_stats = mv88e6095_stats_get_stats, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, + .serdes_power = mv88e6185_serdes_power, + .serdes_get_lane = mv88e6185_serdes_get_lane, + .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -3278,6 +3291,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6185_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, @@ -3298,6 +3312,12 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .serdes_power = mv88e6185_serdes_power, + .serdes_get_lane = mv88e6185_serdes_get_lane, + .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6097_serdes_irq_enable, + .serdes_irq_status = mv88e6097_serdes_irq_status, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6085_g1_rmu_disable, @@ -3316,6 +3336,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_set_frame_mode = mv88e6085_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3350,6 +3371,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, @@ -3391,6 +3413,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, @@ -3442,6 +3465,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, @@ -3483,6 +3507,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .phy_read = mv88e6165_phy_read, .phy_write = mv88e6165_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, @@ -3517,6 +3542,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -3559,6 +3585,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6352_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -3610,6 +3637,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -3652,6 +3680,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6352_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -3705,6 +3734,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6185_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_set_frame_mode = mv88e6085_port_set_frame_mode, .port_set_egress_floods = mv88e6185_port_set_egress_floods, @@ -3722,6 +3752,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, + .serdes_power = mv88e6185_serdes_power, + .serdes_get_lane = mv88e6185_serdes_get_lane, + .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state, .set_cascade_port = mv88e6185_g1_set_cascade_port, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, @@ -3742,6 +3775,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6390_port_set_speed_duplex, .port_max_speed_mode = mv88e6390_port_max_speed_mode, @@ -3801,6 +3835,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex, .port_max_speed_mode = mv88e6390x_port_max_speed_mode, @@ -3860,6 +3895,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6390_port_set_speed_duplex, .port_max_speed_mode = mv88e6390_port_max_speed_mode, @@ -3919,6 +3955,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6352_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -3977,6 +4014,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6250_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -4014,6 +4052,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6390_port_set_speed_duplex, .port_max_speed_mode = mv88e6390_port_max_speed_mode, @@ -4075,6 +4114,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, @@ -4117,6 +4157,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, @@ -4157,6 +4198,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, @@ -4210,6 +4252,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -4250,6 +4293,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -4294,6 +4338,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6352_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, @@ -4354,6 +4399,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6390_port_set_speed_duplex, .port_max_speed_mode = mv88e6390_port_max_speed_mode, @@ -4417,6 +4463,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6390x_port_set_speed_duplex, .port_max_speed_mode = mv88e6390x_port_max_speed_mode, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 81c244fc0419..3543055bcb51 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -245,6 +245,7 @@ enum mv88e6xxx_region_id { MV88E6XXX_REGION_GLOBAL1 = 0, MV88E6XXX_REGION_GLOBAL2, MV88E6XXX_REGION_ATU, + MV88E6XXX_REGION_VTU, _MV88E6XXX_REGION_MAX, }; @@ -416,6 +417,10 @@ struct mv88e6xxx_ops { */ int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link); + /* Synchronise the port link state with that of the SERDES + */ + int (*port_sync_link)(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup); + #define PAUSE_ON 1 #define PAUSE_OFF 0 @@ -672,6 +677,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip) return chip->info->num_ports; } +static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip) +{ + return chip->info->max_vid; +} + static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip) { return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0); diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c index ade04c036fd9..21953d6d484c 100644 --- a/drivers/net/dsa/mv88e6xxx/devlink.c +++ b/drivers/net/dsa/mv88e6xxx/devlink.c @@ -417,6 +417,92 @@ out: return err; } +/** + * struct mv88e6xxx_devlink_vtu_entry - Devlink VTU entry + * @fid: Global1/2: FID and VLAN policy. + * @sid: Global1/3: SID, unknown filters and learning. + * @op: Global1/5: FID (old chipsets). + * @vid: Global1/6: VID, valid, and page. + * @data: Global1/7-9: Membership data and priority override. + * @resvd: Reserved. Also happens to align the size to 16B. + * + * The VTU entry format varies between chipset generations, the + * descriptions above represent the superset of all possible + * information, not all fields are valid on all devices. Since this is + * a low-level debug interface, copy all data verbatim and defer + * parsing to the consumer. + */ +struct mv88e6xxx_devlink_vtu_entry { + u16 fid; + u16 sid; + u16 op; + u16 vid; + u16 data[3]; + u16 resvd; +}; + +static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct mv88e6xxx_devlink_vtu_entry *table, *entry; + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct mv88e6xxx_chip *chip = ds->priv; + struct mv88e6xxx_vtu_entry vlan; + int err; + + table = kcalloc(mv88e6xxx_max_vid(chip) + 1, + sizeof(struct mv88e6xxx_devlink_vtu_entry), + GFP_KERNEL); + if (!table) + return -ENOMEM; + + entry = table; + vlan.vid = mv88e6xxx_max_vid(chip); + vlan.valid = false; + + mv88e6xxx_reg_lock(chip); + + do { + err = mv88e6xxx_g1_vtu_getnext(chip, &vlan); + if (err) + break; + + if (!vlan.valid) + break; + + err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID, + &entry->fid); + err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID, + &entry->sid); + err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP, + &entry->op); + err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID, + &entry->vid); + err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1, + &entry->data[0]); + err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2, + &entry->data[1]); + err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3, + &entry->data[2]); + if (err) + break; + + entry++; + } while (vlan.vid < mv88e6xxx_max_vid(chip)); + + mv88e6xxx_reg_unlock(chip); + + if (err) { + kfree(table); + return err; + } + + *data = (u8 *)table; + return 0; +} + static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port, const struct devlink_port_region_ops *ops, struct netlink_ext_ack *extack, @@ -475,6 +561,12 @@ static struct devlink_region_ops mv88e6xxx_region_atu_ops = { .destructor = kfree, }; +static struct devlink_region_ops mv88e6xxx_region_vtu_ops = { + .name = "vtu", + .snapshot = mv88e6xxx_region_vtu_snapshot, + .destructor = kfree, +}; + static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = { .name = "port", .snapshot = mv88e6xxx_region_port_snapshot, @@ -498,6 +590,10 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = { .ops = &mv88e6xxx_region_atu_ops /* calculated at runtime */ }, + [MV88E6XXX_REGION_VTU] = { + .ops = &mv88e6xxx_region_vtu_ops + /* calculated at runtime */ + }, }; static void @@ -576,9 +672,16 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds, ops = mv88e6xxx_regions[i].ops; size = mv88e6xxx_regions[i].size; - if (i == MV88E6XXX_REGION_ATU) + switch (i) { + case MV88E6XXX_REGION_ATU: size = mv88e6xxx_num_databases(chip) * sizeof(struct mv88e6xxx_devlink_atu_entry); + break; + case MV88E6XXX_REGION_VTU: + size = mv88e6xxx_max_vid(chip) * + sizeof(struct mv88e6xxx_devlink_vtu_entry); + break; + } region = dsa_devlink_region_create(ds, ops, 1, size); if (IS_ERR(region)) diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index e05abe61fa11..80a182c5b98a 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -330,6 +330,8 @@ void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip); int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash); int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash); +int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry); int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip, struct mv88e6xxx_vtu_entry *entry); int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index bac9a8a68e50..40bd67a5c8e9 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -333,7 +333,7 @@ static int mv88e6xxx_g1_atu_move(struct mv88e6xxx_chip *chip, u16 fid, mask = chip->info->atu_move_port_mask; shift = bitmap_weight(&mask, 16); - entry.state = 0xf, /* Full EntryState means Move */ + entry.state = 0xf; /* Full EntryState means Move */ entry.portvec = from_port & mask; entry.portvec |= (to_port & mask) << shift; diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 1048509a849b..66ddf67b8737 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -307,8 +307,8 @@ static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip, return 0; } -static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_entry *entry) { int err; diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 8128dc607cf4..77a5fd1798cd 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -162,6 +162,42 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link) return 0; } +int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup) +{ + const struct mv88e6xxx_ops *ops = chip->info->ops; + int err = 0; + int link; + + if (isup) + link = LINK_FORCED_UP; + else + link = LINK_FORCED_DOWN; + + if (ops->port_set_link) + err = ops->port_set_link(chip, port, link); + + return err; +} + +int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup) +{ + const struct mv88e6xxx_ops *ops = chip->info->ops; + int err = 0; + int link; + + if (mode == MLO_AN_INBAND) + link = LINK_UNFORCED; + else if (isup) + link = LINK_FORCED_UP; + else + link = LINK_FORCED_DOWN; + + if (ops->port_set_link) + err = ops->port_set_link(chip, port, link); + + return err; +} + static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, int speed, bool alt_bit, bool force_bit, int duplex) diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 44d76ac973f6..500e1d4896ff 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -298,6 +298,9 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link); +int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup); +int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup); + int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, int speed, int duplex); int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 9c07b4f3d345..3195936dc5be 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -400,14 +400,16 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) { u16 *p = _p; u16 reg; + int err; int i; if (!mv88e6352_port_has_serdes(chip, port)) return; for (i = 0 ; i < 32; i++) { - mv88e6352_serdes_read(chip, i, ®); - p[i] = reg; + err = mv88e6352_serdes_read(chip, i, ®); + if (!err) + p[i] = reg; } } @@ -428,6 +430,115 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) return lane; } +int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool up) +{ + /* The serdes power can't be controlled on this switch chip but we need + * to supply this function to avoid returning -EOPNOTSUPP in + * mv88e6xxx_serdes_power_up/mv88e6xxx_serdes_power_down + */ + return 0; +} + +u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +{ + /* There are no configurable serdes lanes on this switch chip but we + * need to return non-zero so that callers of + * mv88e6xxx_serdes_get_lane() know this is a serdes port. + */ + switch (chip->ports[port].cmode) { + case MV88E6185_PORT_STS_CMODE_SERDES: + case MV88E6185_PORT_STS_CMODE_1000BASE_X: + return 0xff; + default: + return 0; + } +} + +int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + u8 lane, struct phylink_link_state *state) +{ + int err; + u16 status; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status); + if (err) + return err; + + state->link = !!(status & MV88E6XXX_PORT_STS_LINK); + + if (state->link) { + state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? DUPLEX_FULL : DUPLEX_HALF; + + switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) { + case MV88E6XXX_PORT_STS_SPEED_1000: + state->speed = SPEED_1000; + break; + case MV88E6XXX_PORT_STS_SPEED_100: + state->speed = SPEED_100; + break; + case MV88E6XXX_PORT_STS_SPEED_10: + state->speed = SPEED_10; + break; + default: + dev_err(chip->dev, "invalid PHY speed\n"); + return -EINVAL; + } + } else { + state->duplex = DUPLEX_UNKNOWN; + state->speed = SPEED_UNKNOWN; + } + + return 0; +} + +int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable) +{ + u8 cmode = chip->ports[port].cmode; + + /* The serdes interrupts are enabled in the G2_INT_MASK register. We + * need to return 0 to avoid returning -EOPNOTSUPP in + * mv88e6xxx_serdes_irq_enable/mv88e6xxx_serdes_irq_disable + */ + switch (cmode) { + case MV88E6185_PORT_STS_CMODE_SERDES: + case MV88E6185_PORT_STS_CMODE_1000BASE_X: + return 0; + } + + return -EOPNOTSUPP; +} + +static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) +{ + u16 status; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status); + if (err) { + dev_err(chip->dev, "can't read port status: %d\n", err); + return; + } + + dsa_port_phylink_mac_change(chip->ds, port, !!(status & MV88E6XXX_PORT_STS_LINK)); +} + +irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + u8 lane) +{ + u8 cmode = chip->ports[port].cmode; + + switch (cmode) { + case MV88E6185_PORT_STS_CMODE_SERDES: + case MV88E6185_PORT_STS_CMODE_1000BASE_X: + mv88e6097_serdes_irq_link(chip, port); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) { u8 cmode = chip->ports[port].cmode; @@ -987,6 +1098,7 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) u16 *p = _p; int lane; u16 reg; + int err; int i; lane = mv88e6xxx_serdes_get_lane(chip, port); @@ -994,8 +1106,9 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p) return; for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) { - mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - mv88e6390_serdes_regs[i], ®); - p[i] = reg; + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + mv88e6390_serdes_regs[i], ®); + if (!err) + p[i] = reg; } } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 14315f26228a..93822ef9bab8 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -73,6 +73,7 @@ #define MV88E6390_PG_CONTROL 0xf010 #define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0) +u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); @@ -85,6 +86,8 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, u8 lane, unsigned int mode, phy_interface_t interface, const unsigned long *advertise); +int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + u8 lane, struct phylink_link_state *state); int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, u8 lane, struct phylink_link_state *state); int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, @@ -101,14 +104,20 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port); +int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool up); int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane, bool on); +int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, + bool enable); int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, bool enable); int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane, bool enable); +irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + u8 lane); irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, u8 lane); irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index c444ef3da3e2..7dc230677b78 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -112,10 +112,32 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port, ocelot_port_bridge_leave(ocelot, port, br); } -/* This callback needs to be present */ static int felix_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { + struct ocelot *ocelot = ds->priv; + u16 vid, flags = vlan->flags; + int err; + + /* Ocelot switches copy frames as-is to the CPU, so the flags: + * egress-untagged or not, pvid or not, make no difference. This + * behavior is already better than what DSA just tries to approximate + * when it installs the VLAN with the same flags on the CPU port. + * Just accept any configuration, and don't let ocelot deny installing + * multiple native VLANs on the NPI port, because the switch doesn't + * look at the port tag settings towards the NPI interface anyway. + */ + if (port == ocelot->npi) + return 0; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ocelot_vlan_prepare(ocelot, port, vid, + flags & BRIDGE_VLAN_INFO_PVID, + flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (err) + return err; + } + return 0; } @@ -135,9 +157,6 @@ static void felix_vlan_add(struct dsa_switch *ds, int port, u16 vid; int err; - if (dsa_is_cpu_port(ds, port)) - flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { err = ocelot_vlan_add(ocelot, port, vid, flags & BRIDGE_VLAN_INFO_PVID, diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 307466b90489..83d481ef9273 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -384,7 +384,6 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, { struct realtek_smi *smi = ds->priv; u16 vid; - int ret; for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++) if (!smi->ops->is_vlan_valid(smi, vid)) @@ -397,11 +396,7 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port, * FIXME: what's with this 4k business? * Just rtl8366_enable_vlan() seems inconclusive. */ - ret = rtl8366_enable_vlan4k(smi, true); - if (ret) - return ret; - - return 0; + return rtl8366_enable_vlan4k(smi, true); } EXPORT_SYMBOL_GPL(rtl8366_vlan_prepare); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index bab3a9bb5e6f..f82ad7419508 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -124,7 +124,7 @@ static void dummy_setup(struct net_device *dev) dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; - dev->features |= NETIF_F_ALL_TSO; + dev->features |= NETIF_F_GSO_SOFTWARE; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c index d60a86aa8aa8..9aac7119d382 100644 --- a/drivers/net/ethernet/8390/mac8390.c +++ b/drivers/net/ethernet/8390/mac8390.c @@ -175,7 +175,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) default: return MAC8390_APPLE; } - break; case NUBUS_DRSW_APPLE: switch (fres->dr_hw) { @@ -186,11 +185,9 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) default: return MAC8390_APPLE; } - break; case NUBUS_DRSW_ASANTE: return MAC8390_ASANTE; - break; case NUBUS_DRSW_TECHWORKS: case NUBUS_DRSW_DAYNA2: @@ -199,11 +196,9 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) return MAC8390_CABLETRON; else return MAC8390_APPLE; - break; case NUBUS_DRSW_FARALLON: return MAC8390_FARALLON; - break; case NUBUS_DRSW_KINETICS: switch (fres->dr_hw) { @@ -212,7 +207,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) default: return MAC8390_KINETICS; } - break; case NUBUS_DRSW_DAYNA: /* @@ -224,7 +218,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres) return MAC8390_NONE; else return MAC8390_DAYNA; - break; } return MAC8390_NONE; } diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index 1c97e39b478e..e9756d0ea5b8 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -710,7 +710,7 @@ static void ne_block_output(struct net_device *dev, int count, retry: #endif -#ifdef NE8390_RW_BUGFIX +#ifdef NE_RW_BUGFIX /* Handle the read-before-write bug the same way as the Crynwr packet driver -- the NatSemi method doesn't work. Actually this doesn't always work either, but if you have diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index bc6edb3f1af3..d6715008e04d 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -610,7 +610,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count, /* We should already be in page 0, but to be safe... */ outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); -#ifdef NE8390_RW_BUGFIX +#ifdef NE_RW_BUGFIX /* Handle the read-before-write bug the same way as the * Crynwr packet driver -- the NatSemi method doesn't work. * Actually this doesn't always work either, but if you have diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 5f8769aa469d..02087d443e73 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -71,7 +71,8 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, dma_addr_t addr) { if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { - pr_err("DMA address has more bits that the device supports\n"); + netdev_err(ena_dev->net_device, + "DMA address has more bits that the device supports\n"); return -EINVAL; } @@ -83,6 +84,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) { + struct ena_com_dev *ena_dev = admin_queue->ena_dev; struct ena_com_admin_sq *sq = &admin_queue->sq; u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); @@ -90,7 +92,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) &sq->dma_addr, GFP_KERNEL); if (!sq->entries) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -105,6 +107,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) { + struct ena_com_dev *ena_dev = admin_queue->ena_dev; struct ena_com_admin_cq *cq = &admin_queue->cq; u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); @@ -112,7 +115,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) &cq->dma_addr, GFP_KERNEL); if (!cq->entries) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -135,7 +138,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -156,7 +159,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); if (unlikely(!aenq_handlers)) { - pr_err("AENQ handlers pointer is NULL\n"); + netdev_err(ena_dev->net_device, + "AENQ handlers pointer is NULL\n"); return -EINVAL; } @@ -176,18 +180,21 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu u16 command_id, bool capture) { if (unlikely(command_id >= admin_queue->q_depth)) { - pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n", - command_id, admin_queue->q_depth); + netdev_err(admin_queue->ena_dev->net_device, + "Command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, admin_queue->q_depth); return NULL; } if (unlikely(!admin_queue->comp_ctx)) { - pr_err("Completion context is NULL\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Completion context is NULL\n"); return NULL; } if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { - pr_err("Completion context is occupied\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Completion context is occupied\n"); return NULL; } @@ -217,7 +224,8 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu /* In case of queue FULL */ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - pr_debug("Admin queue is full.\n"); + netdev_dbg(admin_queue->ena_dev->net_device, + "Admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } @@ -259,6 +267,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) { + struct ena_com_dev *ena_dev = admin_queue->ena_dev; size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); struct ena_comp_ctx *comp_ctx; u16 i; @@ -266,7 +275,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); if (unlikely(!admin_queue->comp_ctx)) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -337,7 +346,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, } if (!io_sq->desc_addr.virt_addr) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, + "Memory allocation failed\n"); return -ENOMEM; } } @@ -363,7 +373,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); if (!io_sq->bounce_buf_ctrl.base_buffer) { - pr_err("Bounce buffer memory allocation failed\n"); + netdev_err(ena_dev->net_device, + "Bounce buffer memory allocation failed\n"); return -ENOMEM; } @@ -423,7 +434,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, } if (!io_cq->cdesc_addr.virt_addr) { - pr_err("Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } @@ -444,7 +455,8 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); if (unlikely(!comp_ctx)) { - pr_err("comp_ctx is NULL. Changing the admin queue running state\n"); + netdev_err(admin_queue->ena_dev->net_device, + "comp_ctx is NULL. Changing the admin queue running state\n"); admin_queue->running_state = false; return; } @@ -496,10 +508,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu admin_queue->stats.completed_cmd += comp_num; } -static int ena_com_comp_status_to_errno(u8 comp_status) +static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, + u8 comp_status) { if (unlikely(comp_status != 0)) - pr_err("Admin command failed[%u]\n", comp_status); + netdev_err(admin_queue->ena_dev->net_device, + "Admin command failed[%u]\n", comp_status); switch (comp_status) { case ENA_ADMIN_SUCCESS: @@ -546,7 +560,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c break; if (time_is_before_jiffies(timeout)) { - pr_err("Wait for completion (polling) timeout\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.no_completion++; @@ -562,7 +577,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { - pr_err("Command was aborted\n"); + netdev_err(admin_queue->ena_dev->net_device, + "Command was aborted\n"); spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.aborted_cmd++; spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -573,7 +589,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status); - ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); + ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; @@ -615,7 +631,8 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set LLQ configurations: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set LLQ configurations: %d\n", ret); return ret; } @@ -637,8 +654,9 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->header_location_ctrl = llq_default_cfg->llq_header_location; } else { - pr_err("Invalid header location control, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid header location control, supported: 0x%x\n", + supported_feat); return -EINVAL; } @@ -652,14 +670,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; } else { - pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid desc_stride_ctrl, supported: 0x%x\n", + supported_feat); return -EINVAL; } - pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_stride_ctrl, supported_feat, - llq_info->desc_stride_ctrl); + netdev_err(ena_dev->net_device, + "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_stride_ctrl, + supported_feat, llq_info->desc_stride_ctrl); } } else { llq_info->desc_stride_ctrl = 0; @@ -680,20 +700,23 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; llq_info->desc_list_entry_size = 256; } else { - pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid entry_size_ctrl, supported: 0x%x\n", + supported_feat); return -EINVAL; } - pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_ring_entry_size, supported_feat, - llq_info->desc_list_entry_size); + netdev_err(ena_dev->net_device, + "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_ring_entry_size, supported_feat, + llq_info->desc_list_entry_size); } if (unlikely(llq_info->desc_list_entry_size & 0x7)) { /* The desc list entry size should be whole multiply of 8 * This requirement comes from __iowrite64_copy() */ - pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size); + netdev_err(ena_dev->net_device, "Illegal entry size %d\n", + llq_info->desc_list_entry_size); return -EINVAL; } @@ -716,14 +739,16 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; } else { - pr_err("Invalid descs_num_before_header, supported: 0x%x\n", - supported_feat); + netdev_err(ena_dev->net_device, + "Invalid descs_num_before_header, supported: 0x%x\n", + supported_feat); return -EINVAL; } - pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_num_decs_before_header, - supported_feat, llq_info->descs_num_before_header); + netdev_err(ena_dev->net_device, + "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", + llq_default_cfg->llq_num_decs_before_header, + supported_feat, llq_info->descs_num_before_header); } /* Check for accelerated queue supported */ llq_accel_mode_get = llq_features->accel_mode.u.get; @@ -739,7 +764,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, rc = ena_com_set_llq(ena_dev); if (rc) - pr_err("Cannot set LLQ configuration: %d\n", rc); + netdev_err(ena_dev->net_device, + "Cannot set LLQ configuration: %d\n", rc); return rc; } @@ -766,15 +792,17 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com spin_unlock_irqrestore(&admin_queue->q_lock, flags); if (comp_ctx->status == ENA_CMD_COMPLETED) { - pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", - comp_ctx->cmd_opcode, - admin_queue->auto_polling ? "ON" : "OFF"); + netdev_err(admin_queue->ena_dev->net_device, + "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", + comp_ctx->cmd_opcode, + admin_queue->auto_polling ? "ON" : "OFF"); /* Check if fallback to polling is enabled */ if (admin_queue->auto_polling) admin_queue->polling = true; } else { - pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n", - comp_ctx->cmd_opcode, comp_ctx->status); + netdev_err(admin_queue->ena_dev->net_device, + "The ena device didn't send a completion for the admin cmd %d status %d\n", + comp_ctx->cmd_opcode, comp_ctx->status); } /* Check if shifted to polling mode. * This will happen if there is a completion without an interrupt @@ -787,7 +815,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com } } - ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); + ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: comp_ctxt_release(admin_queue, comp_ctx); return ret; @@ -834,15 +862,17 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) } if (unlikely(i == timeout)) { - pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", - mmio_read->seq_num, offset, read_resp->req_id, - read_resp->reg_off); + netdev_err(ena_dev->net_device, + "Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", + mmio_read->seq_num, offset, read_resp->req_id, + read_resp->reg_off); ret = ENA_MMIO_READ_TIMEOUT; goto err; } if (read_resp->reg_off != offset) { - pr_err("Read failure: wrong offset provided\n"); + netdev_err(ena_dev->net_device, + "Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -901,7 +931,8 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - pr_err("Failed to destroy io sq error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to destroy io sq error: %d\n", ret); return ret; } @@ -951,7 +982,8 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, + "Reg read timeout occurred\n"); return -ETIME; } @@ -991,7 +1023,8 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - pr_debug("Feature %d isn't supported\n", feature_id); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + feature_id); return -EOPNOTSUPP; } @@ -1010,7 +1043,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, &get_cmd.control_buffer.address, control_buf_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -1027,8 +1060,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - pr_err("Failed to submit get_feature command %d error: %d\n", - feature_id, ret); + netdev_err(ena_dev->net_device, + "Failed to submit get_feature command %d error: %d\n", + feature_id, ret); return ret; } @@ -1130,9 +1164,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, if ((get_resp.u.ind_table.min_size > log_size) || (get_resp.u.ind_table.max_size < log_size)) { - pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", - 1 << log_size, 1 << get_resp.u.ind_table.min_size, - 1 << get_resp.u.ind_table.max_size); + netdev_err(ena_dev->net_device, + "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", + 1 << log_size, 1 << get_resp.u.ind_table.min_size, + 1 << get_resp.u.ind_table.max_size); return -EINVAL; } @@ -1223,7 +1258,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, &create_cmd.sq_ba, io_sq->desc_addr.phys_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, + "Memory address set failed\n"); return ret; } } @@ -1234,7 +1270,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - pr_err("Failed to create IO SQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to create IO SQ. error: %d\n", ret); return ret; } @@ -1252,7 +1289,8 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, cmd_completion.llq_descriptors_offset); } - pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); + netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", + io_sq->idx, io_sq->q_depth); return ret; } @@ -1286,7 +1324,8 @@ static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; if (unlikely(!intr_delay_resolution)) { - pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); + netdev_err(ena_dev->net_device, + "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } @@ -1321,22 +1360,25 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); if (IS_ERR(comp_ctx)) { - if (comp_ctx == ERR_PTR(-ENODEV)) - pr_debug("Failed to submit command [%ld]\n", - PTR_ERR(comp_ctx)); + ret = PTR_ERR(comp_ctx); + if (ret == -ENODEV) + netdev_dbg(admin_queue->ena_dev->net_device, + "Failed to submit command [%d]\n", ret); else - pr_err("Failed to submit command [%ld]\n", - PTR_ERR(comp_ctx)); + netdev_err(admin_queue->ena_dev->net_device, + "Failed to submit command [%d]\n", ret); - return PTR_ERR(comp_ctx); + return ret; } ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue); if (unlikely(ret)) { if (admin_queue->running_state) - pr_err("Failed to process command. ret = %d\n", ret); + netdev_err(admin_queue->ena_dev->net_device, + "Failed to process command. ret = %d\n", ret); else - pr_debug("Failed to process command. ret = %d\n", ret); + netdev_dbg(admin_queue->ena_dev->net_device, + "Failed to process command. ret = %d\n", ret); } return ret; } @@ -1365,7 +1407,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, &create_cmd.cq_ba, io_cq->cdesc_addr.phys_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -1375,7 +1417,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - pr_err("Failed to create IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to create IO CQ. error: %d\n", ret); return ret; } @@ -1394,7 +1437,8 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.numa_node_register_offset); - pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); + netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", + io_cq->idx, io_cq->q_depth); return ret; } @@ -1404,8 +1448,9 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, struct ena_com_io_cq **io_cq) { if (qid >= ENA_TOTAL_NUM_QUEUES) { - pr_err("Invalid queue number %d but the max is %d\n", qid, - ENA_TOTAL_NUM_QUEUES); + netdev_err(ena_dev->net_device, + "Invalid queue number %d but the max is %d\n", qid, + ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1471,7 +1516,8 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - pr_err("Failed to destroy IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to destroy IO CQ. error: %d\n", ret); return ret; } @@ -1513,13 +1559,14 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); if (ret) { - pr_info("Can't get aenq configuration\n"); + dev_info(ena_dev->dmadev, "Can't get aenq configuration\n"); return ret; } if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { - pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", - get_resp.u.aenq.supported_groups, groups_flag); + netdev_warn(ena_dev->net_device, + "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", + get_resp.u.aenq.supported_groups, groups_flag); return -EOPNOTSUPP; } @@ -1538,7 +1585,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to config AENQ ret: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to config AENQ ret: %d\n", ret); return ret; } @@ -1546,20 +1594,21 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) int ena_com_get_dma_width(struct ena_com_dev *ena_dev) { u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); - int width; + u32 width; if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >> ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT; - pr_debug("ENA dma width: %d\n", width); + netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width); if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { - pr_err("DMA width illegal value: %d\n", width); + netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", + width); return -EINVAL; } @@ -1583,23 +1632,24 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } - pr_info("ENA device version: %d.%d\n", - (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, - ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); + dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n", + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - pr_info("ENA controller version: %d.%d.%d implementation version %d\n", - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> - ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> - ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), - (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> - ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); + dev_info(ena_dev->dmadev, + "ENA controller version: %d.%d.%d implementation version %d\n", + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> + ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT, + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK), + (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >> + ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT); ctrl_ver_masked = (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) | @@ -1608,7 +1658,8 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < MIN_ENA_CTRL_VER) { - pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); + netdev_err(ena_dev->net_device, + "ENA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -1; } @@ -1741,12 +1792,13 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) { - pr_err("Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { - pr_err("Device isn't ready, abort com init\n"); + netdev_err(ena_dev->net_device, + "Device isn't ready, abort com init\n"); return -ENODEV; } @@ -1823,8 +1875,9 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { - pr_err("Qid (%d) is bigger than max num of queues (%d)\n", - ctx->qid, ENA_TOTAL_NUM_QUEUES); + netdev_err(ena_dev->net_device, + "Qid (%d) is bigger than max num of queues (%d)\n", + ctx->qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1882,8 +1935,9 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) struct ena_com_io_cq *io_cq; if (qid >= ENA_TOTAL_NUM_QUEUES) { - pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid, - ENA_TOTAL_NUM_QUEUES); + netdev_err(ena_dev->net_device, + "Qid (%d) is bigger than max num of queues (%d)\n", + qid, ENA_TOTAL_NUM_QUEUES); return; } @@ -2035,8 +2089,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) timestamp = (u64)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32); - pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", - aenq_common->group, aenq_common->syndrome, timestamp); + netdev_dbg(ena_dev->net_device, + "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", + aenq_common->group, aenq_common->syndrome, timestamp); /* Handle specific event*/ handler_cb = ena_com_get_specific_aenq_cb(ena_dev, @@ -2079,19 +2134,20 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { - pr_err("Reg read32 timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n"); return -ETIME; } if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { - pr_err("Device isn't ready, can't reset device\n"); + netdev_err(ena_dev->net_device, + "Device isn't ready, can't reset device\n"); return -EINVAL; } timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >> ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT; if (timeout == 0) { - pr_err("Invalid timeout value\n"); + netdev_err(ena_dev->net_device, "Invalid timeout value\n"); return -EINVAL; } @@ -2107,7 +2163,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, rc = wait_for_reset_state(ena_dev, timeout, ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (rc != 0) { - pr_err("Reset indication didn't turn on\n"); + netdev_err(ena_dev->net_device, + "Reset indication didn't turn on\n"); return rc; } @@ -2115,7 +2172,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { - pr_err("Reset indication didn't turn off\n"); + netdev_err(ena_dev->net_device, + "Reset indication didn't turn off\n"); return rc; } @@ -2152,7 +2210,8 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - pr_err("Failed to get stats. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to get stats. error: %d\n", ret); return ret; } @@ -2187,7 +2246,7 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, return ret; } -int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) { struct ena_com_admin_queue *admin_queue; struct ena_admin_set_feat_cmd cmd; @@ -2195,7 +2254,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_MTU); return -EOPNOTSUPP; } @@ -2214,7 +2274,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set mtu %d. error: %d\n", mtu, ret); + netdev_err(ena_dev->net_device, + "Failed to set mtu %d. error: %d\n", mtu, ret); return ret; } @@ -2228,7 +2289,8 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, ret = ena_com_get_feature(ena_dev, &resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (unlikely(ret)) { - pr_err("Failed to get offload capabilities %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to get offload capabilities %d\n", ret); return ret; } @@ -2248,8 +2310,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_FUNCTION); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_FUNCTION); return -EOPNOTSUPP; } @@ -2260,8 +2322,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) return ret; if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { - pr_err("Func hash %d isn't supported by device, abort\n", - rss->hash_func); + netdev_err(ena_dev->net_device, + "Func hash %d isn't supported by device, abort\n", + rss->hash_func); return -EOPNOTSUPP; } @@ -2278,7 +2341,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->hash_key_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2290,8 +2353,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) { - pr_err("Failed to set hash function %d. error: %d\n", - rss->hash_func, ret); + netdev_err(ena_dev->net_device, + "Failed to set hash function %d. error: %d\n", + rss->hash_func, ret); return -EINVAL; } @@ -2322,7 +2386,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return rc; if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { - pr_err("Flow hash function %d isn't supported\n", func); + netdev_err(ena_dev->net_device, + "Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } @@ -2330,8 +2395,9 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, case ENA_ADMIN_TOEPLITZ: if (key) { if (key_len != sizeof(hash_key->key)) { - pr_err("key len (%hu) doesn't equal the supported size (%zu)\n", - key_len, sizeof(hash_key->key)); + netdev_err(ena_dev->net_device, + "key len (%hu) doesn't equal the supported size (%zu)\n", + key_len, sizeof(hash_key->key)); return -EINVAL; } memcpy(hash_key->key, key, key_len); @@ -2343,7 +2409,8 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, rss->hash_init_val = init_val; break; default: - pr_err("Invalid hash function (%d)\n", func); + netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n", + func); return -EINVAL; } @@ -2429,8 +2496,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_RSS_HASH_INPUT); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_HASH_INPUT); return -EOPNOTSUPP; } @@ -2448,7 +2515,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->hash_ctrl_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } cmd.control_buffer.length = sizeof(*hash_ctrl); @@ -2459,7 +2526,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set hash input. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set hash input. error: %d\n", ret); return ret; } @@ -2509,9 +2577,10 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) available_fields = hash_ctrl->selected_fields[i].fields & hash_ctrl->supported_fields[i].fields; if (available_fields != hash_ctrl->selected_fields[i].fields) { - pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", - i, hash_ctrl->supported_fields[i].fields, - hash_ctrl->selected_fields[i].fields); + netdev_err(ena_dev->net_device, + "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", + i, hash_ctrl->supported_fields[i].fields, + hash_ctrl->selected_fields[i].fields); return -EOPNOTSUPP; } } @@ -2535,7 +2604,8 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, int rc; if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { - pr_err("Invalid proto num (%u)\n", proto); + netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", + proto); return -EINVAL; } @@ -2547,8 +2617,9 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, /* Make sure all the fields are supported */ supported_fields = hash_ctrl->supported_fields[proto].fields; if ((hash_fields & supported_fields) != hash_fields) { - pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n", - proto, hash_fields, supported_fields); + netdev_err(ena_dev->net_device, + "Proto %d doesn't support the required fields %x. supports only: %x\n", + proto, hash_fields, supported_fields); } hash_ctrl->selected_fields[proto].fields = hash_fields; @@ -2588,14 +2659,15 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) if (!ena_com_check_supported_feature_id( ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); return -EOPNOTSUPP; } ret = ena_com_ind_tbl_convert_to_device(ena_dev); if (ret) { - pr_err("Failed to convert host indirection table to device table\n"); + netdev_err(ena_dev->net_device, + "Failed to convert host indirection table to device table\n"); return ret; } @@ -2612,7 +2684,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) &cmd.control_buffer.address, rss->rss_ind_tbl_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2626,7 +2698,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set indirect table. error: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set indirect table. error: %d\n", ret); return ret; } @@ -2782,7 +2855,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) &cmd.u.host_attr.debug_ba, host_attr->debug_area_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2790,7 +2863,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) &cmd.u.host_attr.os_info_ba, host_attr->host_info_dma_addr); if (unlikely(ret)) { - pr_err("Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } @@ -2803,7 +2876,8 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - pr_err("Failed to set host attributes: %d\n", ret); + netdev_err(ena_dev->net_device, + "Failed to set host attributes: %d\n", ret); return ret; } @@ -2815,12 +2889,14 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev) ENA_ADMIN_INTERRUPT_MODERATION); } -static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, +static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev, + u32 coalesce_usecs, u32 intr_delay_resolution, u32 *intr_moder_interval) { if (!intr_delay_resolution) { - pr_err("Illegal interrupt delay granularity value\n"); + netdev_err(ena_dev->net_device, + "Illegal interrupt delay granularity value\n"); return -EFAULT; } @@ -2832,7 +2908,8 @@ static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, u32 tx_coalesce_usecs) { - return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, + return ena_com_update_nonadaptive_moderation_interval(ena_dev, + tx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_tx_interval); } @@ -2840,7 +2917,8 @@ int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_de int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, u32 rx_coalesce_usecs) { - return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, + return ena_com_update_nonadaptive_moderation_interval(ena_dev, + rx_coalesce_usecs, ena_dev->intr_delay_resolution, &ena_dev->intr_moder_rx_interval); } @@ -2856,12 +2934,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == -EOPNOTSUPP) { - pr_debug("Feature %d isn't supported\n", - ENA_ADMIN_INTERRUPT_MODERATION); + netdev_dbg(ena_dev->net_device, + "Feature %d isn't supported\n", + ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { - pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", - rc); + netdev_err(ena_dev->net_device, + "Failed to get interrupt moderation admin cmd. rc: %d\n", + rc); } /* no moderation supported, disable adaptive support */ @@ -2909,7 +2989,8 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); if (unlikely(ena_dev->tx_max_header_size == 0)) { - pr_err("The size of the LLQ entry is smaller than needed\n"); + netdev_err(ena_dev->net_device, + "The size of the LLQ entry is smaller than needed\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 55097750d062..343caf41e709 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -303,6 +303,7 @@ struct ena_com_dev { u8 __iomem *reg_bar; void __iomem *mem_bar; void *dmadev; + struct net_device *net_device; enum ena_admin_placement_policy_type tx_mem_queue_type; u32 tx_max_header_size; @@ -604,7 +605,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, * * @return: 0 on Success and negative value otherwise. */ -int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu); +int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu); /* ena_com_get_offload_settings - Retrieve the device offloads capabilities * @ena_dev: ENA communication layer struct @@ -931,6 +932,26 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq_features, struct ena_llq_configurations *llq_default_config); +/* ena_com_io_sq_to_ena_dev - Extract ena_com_dev using contained field io_sq. + * @io_sq: IO submit queue struct + * + * @return - ena_com_dev struct extracted from io_sq + */ +static inline struct ena_com_dev *ena_com_io_sq_to_ena_dev(struct ena_com_io_sq *io_sq) +{ + return container_of(io_sq, struct ena_com_dev, io_sq_queues[io_sq->qid]); +} + +/* ena_com_io_cq_to_ena_dev - Extract ena_com_dev using contained field io_cq. + * @io_sq: IO submit queue struct + * + * @return - ena_com_dev struct extracted from io_sq + */ +static inline struct ena_com_dev *ena_com_io_cq_to_ena_dev(struct ena_com_io_cq *io_cq) +{ + return container_of(io_cq, struct ena_com_dev, io_cq_queues[io_cq->qid]); +} + static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev) { return ena_dev->adaptive_coalescing; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 032ab9f20438..c3be751e7379 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -58,13 +58,15 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, if (is_llq_max_tx_burst_exists(io_sq)) { if (unlikely(!io_sq->entries_in_tx_burst_left)) { - pr_err("Error: trying to send more packets than tx burst allows\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Error: trying to send more packets than tx burst allows\n"); return -ENOSPC; } io_sq->entries_in_tx_burst_left--; - pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n", - io_sq->qid, io_sq->entries_in_tx_burst_left); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Decreasing entries_in_tx_burst_left of queue %d to %d\n", + io_sq->qid, io_sq->entries_in_tx_burst_left); } /* Make sure everything was written into the bounce buffer before @@ -102,12 +104,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { - pr_err("Trying to write header larger than llq entry can accommodate\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Trying to write header larger than llq entry can accommodate\n"); return -EFAULT; } if (unlikely(!bounce_buffer)) { - pr_err("Bounce buffer is NULL\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Bounce buffer is NULL\n"); return -EFAULT; } @@ -125,7 +129,8 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) bounce_buffer = pkt_ctrl->curr_bounce_buf; if (unlikely(!bounce_buffer)) { - pr_err("Bounce buffer is NULL\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Bounce buffer is NULL\n"); return NULL; } @@ -250,8 +255,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, io_cq->cur_rx_pkt_cdesc_count = 0; io_cq->cur_rx_pkt_cdesc_start_idx = head_masked; - pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", - io_cq->qid, *first_cdesc_idx, count); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "ENA q_id: %d packets were completed. first desc idx %u descs# %d\n", + io_cq->qid, *first_cdesc_idx, count); } else { io_cq->cur_rx_pkt_cdesc_count += count; count = 0; @@ -335,7 +341,8 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, return 0; } -static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, +static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, + struct ena_com_rx_ctx *ena_rx_ctx, struct ena_eth_io_rx_cdesc_base *cdesc) { ena_rx_ctx->l3_proto = cdesc->status & @@ -357,10 +364,11 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT; - pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", - ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, - ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, - ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", + ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, + ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, + ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); } /*****************************************************************************/ @@ -385,13 +393,15 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, /* num_bufs +1 for potential meta desc */ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) { - pr_debug("Not enough space in the tx queue\n"); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Not enough space in the tx queue\n"); return -ENOMEM; } if (unlikely(header_len > io_sq->tx_max_header_size)) { - pr_err("Header size is too large %d max header: %d\n", - header_len, io_sq->tx_max_header_size); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Header size is too large %d max header: %d\n", + header_len, io_sq->tx_max_header_size); return -EINVAL; } @@ -405,7 +415,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); if (unlikely(rc)) { - pr_err("Failed to create and store tx meta desc\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Failed to create and store tx meta desc\n"); return rc; } @@ -529,12 +540,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, return 0; } - pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, - nb_hw_desc); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, + nb_hw_desc); if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { - pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, - ena_rx_ctx->max_bufs); + netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, + ena_rx_ctx->max_bufs); return -ENOSPC; } @@ -557,13 +570,15 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, /* Update SQ head ptr */ io_sq->next_to_comp += nb_hw_desc; - pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, - io_sq->next_to_comp); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "[%s][QID#%d] Updating SQ head to: %d\n", __func__, + io_sq->qid, io_sq->next_to_comp); /* Get rx flags from the last pkt */ - ena_com_rx_set_flags(ena_rx_ctx, cdesc); + ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); ena_rx_ctx->descs = nb_hw_desc; + return 0; } @@ -588,11 +603,15 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | ENA_ETH_IO_RX_DESC_LAST_MASK | - (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | - ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + ENA_ETH_IO_RX_DESC_COMP_REQ_MASK | + (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK); desc->req_id = req_id; + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", + __func__, io_sq->qid, req_id); + desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_hi = ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32); diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 2c16c218818a..689313ee25a8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -140,8 +140,9 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, llq_info->descs_per_entry); } - pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, - num_descs, num_entries_needed); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Queue: %d num_descs: %d num_entries_needed: %d\n", + io_sq->qid, num_descs, num_entries_needed); return num_entries_needed > io_sq->entries_in_tx_burst_left; } @@ -151,14 +152,16 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst; u16 tail = io_sq->tail; - pr_debug("Write submission queue doorbell for queue: %d tail: %d\n", - io_sq->qid, tail); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Write submission queue doorbell for queue: %d tail: %d\n", + io_sq->qid, tail); writel(tail, io_sq->db_addr); if (is_llq_max_tx_burst_exists(io_sq)) { - pr_debug("Reset available entries in tx burst for queue %d to %d\n", - io_sq->qid, max_entries_in_tx_burst); + netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, + "Reset available entries in tx burst for queue %d to %d\n", + io_sq->qid, max_entries_in_tx_burst); io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; } @@ -176,8 +179,9 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); if (unlikely(need_update)) { - pr_debug("Write completion queue doorbell for queue %d: head: %d\n", - io_cq->qid, head); + netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Write completion queue doorbell for queue %d: head: %d\n", + io_cq->qid, head); writel(head, io_cq->cq_head_db_reg); io_cq->last_head_update = head; } @@ -240,7 +244,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, *req_id = READ_ONCE(cdesc->req_id); if (unlikely(*req_id >= io_cq->q_depth)) { - pr_err("Invalid req id %d\n", cdesc->req_id); + netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, + "Invalid req id %d\n", cdesc->req_id); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 3b2cd28f962d..d6cc7aa612b7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -3,6 +3,7 @@ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. */ +#include <linux/ethtool.h> #include <linux/pci.h> #include "ena_netdev.h" @@ -94,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(xdp_pass), ENA_STAT_RX_ENTRY(xdp_tx), ENA_STAT_RX_ENTRY(xdp_invalid), + ENA_STAT_RX_ENTRY(xdp_redirect), }; static const struct ena_stats ena_stats_ena_com_strings[] = { @@ -838,7 +840,7 @@ static int ena_set_channels(struct net_device *netdev, /* The check for max value is already done in ethtool */ if (count < ENA_MIN_NUM_IO_QUEUES || (ena_xdp_present(adapter) && - !ena_xdp_legal_queue_count(adapter, channels->combined_count))) + !ena_xdp_legal_queue_count(adapter, count))) return -EINVAL; return ena_update_queue_count(adapter, count); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index df1884d57d1a..06596fa1f9fe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -29,6 +29,8 @@ MODULE_LICENSE("GPL"); /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (5 * HZ) +#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) + #define ENA_NAPI_BUDGET 64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ @@ -78,6 +80,15 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring, static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count); +/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ +static void ena_increase_stat(u64 *statp, u64 cnt, + struct u64_stats_sync *syncp) +{ + u64_stats_update_begin(syncp); + (*statp) += cnt; + u64_stats_update_end(syncp); +} + static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ena_adapter *adapter = netdev_priv(dev); @@ -90,9 +101,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) return; adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.tx_timeout++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); netif_err(adapter, tx_err, dev, "Transmit time out\n"); } @@ -152,9 +161,8 @@ static int ena_xmit_common(struct net_device *dev, if (unlikely(rc)) { netif_err(adapter, tx_queued, dev, "Failed to prepare tx bufs\n"); - u64_stats_update_begin(&ring->syncp); - ring->tx_stats.prepare_ctx_err++; - u64_stats_update_end(&ring->syncp); + ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, + &ring->syncp); if (rc != -ENOMEM) { adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; @@ -225,18 +233,18 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget) return ret; } -static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, - struct ena_tx_buffer *tx_info, - struct xdp_buff *xdp, - void **push_hdr, - u32 *push_len) +static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, + struct ena_tx_buffer *tx_info, + struct xdp_frame *xdpf, + void **push_hdr, + u32 *push_len) { struct ena_adapter *adapter = xdp_ring->adapter; struct ena_com_buf *ena_buf; dma_addr_t dma = 0; u32 size; - tx_info->xdpf = xdp_convert_buff_to_frame(xdp); + tx_info->xdpf = xdpf; size = tx_info->xdpf->len; ena_buf = tx_info->bufs; @@ -262,9 +270,8 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, return 0; error_report_dma_error: - u64_stats_update_begin(&xdp_ring->syncp); - xdp_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&xdp_ring->syncp); + ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1, + &xdp_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); xdp_return_frame_rx_napi(tx_info->xdpf); @@ -274,29 +281,24 @@ error_report_dma_error: return -EINVAL; } -static int ena_xdp_xmit_buff(struct net_device *dev, - struct xdp_buff *xdp, - int qid, - struct ena_rx_buffer *rx_info) +static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring, + struct net_device *dev, + struct xdp_frame *xdpf, + int flags) { - struct ena_adapter *adapter = netdev_priv(dev); struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_tx_buffer *tx_info; - struct ena_ring *xdp_ring; u16 next_to_use, req_id; - int rc; void *push_hdr; u32 push_len; + int rc; - xdp_ring = &adapter->tx_ring[qid]; next_to_use = xdp_ring->next_to_use; req_id = xdp_ring->free_ids[next_to_use]; tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0; - page_ref_inc(rx_info->page); - tx_info->xdp_rx_page = rx_info->page; - rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len); + rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len); if (unlikely(rc)) goto error_drop_packet; @@ -311,34 +313,82 @@ static int ena_xdp_xmit_buff(struct net_device *dev, tx_info, &ena_tx_ctx, next_to_use, - xdp->data_end - xdp->data); + xdpf->len); if (rc) goto error_unmap_dma; /* trigger the dma engine. ena_com_write_sq_doorbell() * has a mb */ - ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); - u64_stats_update_begin(&xdp_ring->syncp); - xdp_ring->tx_stats.doorbells++; - u64_stats_update_end(&xdp_ring->syncp); + if (flags & XDP_XMIT_FLUSH) { + ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); + ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, + &xdp_ring->syncp); + } - return NETDEV_TX_OK; + return rc; error_unmap_dma: ena_unmap_tx_buff(xdp_ring, tx_info); tx_info->xdpf = NULL; error_drop_packet: - __free_page(tx_info->xdp_rx_page); - return NETDEV_TX_OK; + xdp_return_frame(xdpf); + return rc; } -static int ena_xdp_execute(struct ena_ring *rx_ring, - struct xdp_buff *xdp, - struct ena_rx_buffer *rx_info) +static int ena_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct ena_adapter *adapter = netdev_priv(dev); + int qid, i, err, drops = 0; + struct ena_ring *xdp_ring; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return -ENETDOWN; + + /* We assume that all rings have the same XDP program */ + if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog)) + return -ENXIO; + + qid = smp_processor_id() % adapter->xdp_num_queues; + qid += adapter->xdp_first_ring; + xdp_ring = &adapter->tx_ring[qid]; + + /* Other CPU ids might try to send thorugh this queue */ + spin_lock(&xdp_ring->xdp_tx_lock); + + for (i = 0; i < n; i++) { + err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0); + /* The descriptor is freed by ena_xdp_xmit_frame in case + * of an error. + */ + if (err) + drops++; + } + + /* Ring doorbell to make device aware of the packets */ + if (flags & XDP_XMIT_FLUSH) { + ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); + ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, + &xdp_ring->syncp); + } + + spin_unlock(&xdp_ring->xdp_tx_lock); + + /* Return number of packets sent */ + return n - drops; +} + +static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) { struct bpf_prog *xdp_prog; + struct ena_ring *xdp_ring; u32 verdict = XDP_PASS; + struct xdp_frame *xdpf; u64 *xdp_stat; + int qid; rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); @@ -348,28 +398,49 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, verdict = bpf_prog_run_xdp(xdp_prog, xdp); - if (verdict == XDP_TX) { - ena_xdp_xmit_buff(rx_ring->netdev, - xdp, - rx_ring->qid + rx_ring->adapter->num_io_queues, - rx_info); + switch (verdict) { + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_aborted; + break; + } + + /* Find xmit queue */ + qid = rx_ring->qid + rx_ring->adapter->num_io_queues; + xdp_ring = &rx_ring->adapter->tx_ring[qid]; + + /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ + spin_lock(&xdp_ring->xdp_tx_lock); + + ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH); + spin_unlock(&xdp_ring->xdp_tx_lock); xdp_stat = &rx_ring->rx_stats.xdp_tx; - } else if (unlikely(verdict == XDP_ABORTED)) { + break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) { + xdp_stat = &rx_ring->rx_stats.xdp_redirect; + break; + } + fallthrough; + case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; - } else if (unlikely(verdict == XDP_DROP)) { + break; + case XDP_DROP: xdp_stat = &rx_ring->rx_stats.xdp_drop; - } else if (unlikely(verdict == XDP_PASS)) { + break; + case XDP_PASS: xdp_stat = &rx_ring->rx_stats.xdp_pass; - } else { + break; + default: bpf_warn_invalid_xdp_action(verdict); xdp_stat = &rx_ring->rx_stats.xdp_invalid; } - u64_stats_update_begin(&rx_ring->syncp); - (*xdp_stat)++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(xdp_stat, 1, &rx_ring->syncp); out: rcu_read_unlock(); @@ -416,7 +487,7 @@ static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) { int rc; - rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid); + rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); if (rc) { netif_err(rx_ring->adapter, ifup, rx_ring->netdev, @@ -638,6 +709,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter, txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); txr->disable_meta_caching = adapter->disable_meta_caching; + spin_lock_init(&txr->xdp_tx_lock); /* Don't init RX queues for xdp queues */ if (!ENA_IS_XDP_INDEX(adapter, i)) { @@ -922,9 +994,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, page = alloc_page(gfp); if (unlikely(!page)) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.page_alloc_fail++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, + &rx_ring->syncp); return -ENOMEM; } @@ -934,9 +1005,8 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.dma_mapping_err++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, + &rx_ring->syncp); __free_page(page); return -EIO; @@ -952,11 +1022,20 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, return 0; } +static void ena_unmap_rx_buff(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info) +{ + struct ena_com_buf *ena_buf = &rx_info->ena_buf; + + dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, + ENA_PAGE_SIZE, + DMA_BIDIRECTIONAL); +} + static void ena_free_rx_page(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { struct page *page = rx_info->page; - struct ena_com_buf *ena_buf = &rx_info->ena_buf; if (unlikely(!page)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, @@ -964,9 +1043,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, return; } - dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, - ENA_PAGE_SIZE, - DMA_BIDIRECTIONAL); + ena_unmap_rx_buff(rx_ring, rx_info); __free_page(page); rx_info->page = NULL; @@ -1009,9 +1086,8 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) } if (unlikely(i < num)) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.refil_partial++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, + &rx_ring->syncp); netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "Refilled rx qid %d with only %d buffers (from %d)\n", rx_ring->qid, i, num); @@ -1187,9 +1263,7 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, "Invalid req_id: %hu\n", req_id); - u64_stats_update_begin(&ring->syncp); - ring->tx_stats.bad_req_id++; - u64_stats_update_end(&ring->syncp); + ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); /* Trigger device reset */ ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; @@ -1300,9 +1374,8 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) if (netif_tx_queue_stopped(txq) && above_thresh && test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { netif_tx_wake_queue(txq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_wakeup++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, + &tx_ring->syncp); } __netif_tx_unlock(txq); } @@ -1321,9 +1394,8 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) rx_ring->rx_copybreak); if (unlikely(!skb)) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.skb_alloc_fail++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, + &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "Failed to allocate skb. frags: %d\n", frags); return NULL; @@ -1395,9 +1467,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, return NULL; do { - dma_unmap_page(rx_ring->dev, - dma_unmap_addr(&rx_info->ena_buf, paddr), - ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); + ena_unmap_rx_buff(rx_ring, rx_info); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, rx_info->page_offset, len, ENA_PAGE_SIZE); @@ -1451,9 +1521,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, (ena_rx_ctx->l3_csum_err))) { /* ipv4 checksum error */ skb->ip_summed = CHECKSUM_NONE; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_csum++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, + &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX IPv4 header checksum error\n"); return; @@ -1464,9 +1533,8 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { if (unlikely(ena_rx_ctx->l4_csum_err)) { /* TCP/UDP checksum error */ - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_csum++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1, + &rx_ring->syncp); netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, "RX L4 checksum error\n"); skb->ip_summed = CHECKSUM_NONE; @@ -1475,13 +1543,11 @@ static void ena_rx_checksum(struct ena_ring *rx_ring, if (likely(ena_rx_ctx->l4_csum_checked)) { skb->ip_summed = CHECKSUM_UNNECESSARY; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.csum_good++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, + &rx_ring->syncp); } else { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.csum_unchecked++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, + &rx_ring->syncp); skb->ip_summed = CHECKSUM_NONE; } } else { @@ -1529,7 +1595,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) return XDP_DROP; - ret = ena_xdp_execute(rx_ring, xdp, rx_info); + ret = ena_xdp_execute(rx_ring, xdp); /* The xdp program might expand the headers */ if (ret == XDP_PASS) { @@ -1559,6 +1625,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, struct sk_buff *skb; int refill_required; struct xdp_buff xdp; + int xdp_flags = 0; int total_len = 0; int xdp_verdict; int rc = 0; @@ -1606,22 +1673,25 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, &next_to_clean); if (unlikely(!skb)) { - /* The page might not actually be freed here since the - * page reference count is incremented in - * ena_xdp_xmit_buff(), and it will be decreased only - * when send completion was received from the device - */ - if (xdp_verdict == XDP_TX) - ena_free_rx_page(rx_ring, - &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]); for (i = 0; i < ena_rx_ctx.descs; i++) { - rx_ring->free_ids[next_to_clean] = - rx_ring->ena_bufs[i].req_id; + int req_id = rx_ring->ena_bufs[i].req_id; + + rx_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_RX_RING_IDX_NEXT(next_to_clean, rx_ring->ring_size); + + /* Packets was passed for transmission, unmap it + * from RX side. + */ + if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) { + ena_unmap_rx_buff(rx_ring, + &rx_ring->rx_buffer_info[req_id]); + rx_ring->rx_buffer_info[req_id].page = NULL; + } } if (xdp_verdict != XDP_PASS) { + xdp_flags |= xdp_verdict; res_budget--; continue; } @@ -1667,20 +1737,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_refill_rx_bufs(rx_ring, refill_required); } + if (xdp_flags & XDP_REDIRECT) + xdp_do_flush_map(); + return work_done; error: adapter = netdev_priv(rx_ring->netdev); if (rc == -ENOSPC) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_desc_num++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, + &rx_ring->syncp); adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; } else { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.bad_req_id++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, + &rx_ring->syncp); adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; } @@ -1741,9 +1812,8 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring, tx_ring->smoothed_interval, true); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.unmask_interrupt++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, + &tx_ring->syncp); /* It is a shared MSI-X. * Tx and Rx CQ have pointer to it. @@ -1823,7 +1893,7 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) tx_pkts++; total_done += tx_info->tx_descs; - __free_page(tx_info->xdp_rx_page); + xdp_return_frame(xdpf); xdp_ring->free_ids[next_to_clean] = req_id; next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, xdp_ring->ring_size); @@ -2550,9 +2620,8 @@ static int ena_up(struct ena_adapter *adapter) if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) netif_carrier_on(adapter->netdev); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.interface_up++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.interface_up, 1, + &adapter->syncp); set_bit(ENA_FLAG_DEV_UP, &adapter->flags); @@ -2590,9 +2659,8 @@ static void ena_down(struct ena_adapter *adapter) clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.interface_down++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.interface_down, 1, + &adapter->syncp); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); @@ -2820,15 +2888,12 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, (header_len < tx_ring->tx_max_header_size)) return 0; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.linearize++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); rc = skb_linearize(skb); if (unlikely(rc)) { - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.linearize_failed++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, + &tx_ring->syncp); } return rc; @@ -2868,9 +2933,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring, tx_ring->push_buf_intermediate_buf); *header_len = push_len; if (unlikely(skb->data != *push_hdr)) { - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.llq_buffer_copy++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, + &tx_ring->syncp); delta = push_len - skb_head_len; } @@ -2927,9 +2991,8 @@ static int ena_tx_map_skb(struct ena_ring *tx_ring, return 0; error_report_dma_error: - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.dma_mapping_err++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, + &tx_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); tx_info->skb = NULL; @@ -3006,9 +3069,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) __func__, qid); netif_tx_stop_queue(txq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_stop++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, + &tx_ring->syncp); /* There is a rare condition where this function decide to * stop the queue but meanwhile clean_tx_irq updates @@ -3023,9 +3085,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, ENA_TX_WAKEUP_THRESH)) { netif_tx_wake_queue(txq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.queue_wakeup++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, + &tx_ring->syncp); } } @@ -3034,9 +3095,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) * has a mb */ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.doorbells++; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, + &tx_ring->syncp); } return NETDEV_TX_OK; @@ -3242,6 +3302,7 @@ static const struct net_device_ops ena_netdev_ops = { .ndo_set_mac_address = NULL, .ndo_validate_addr = eth_validate_addr, .ndo_bpf = ena_xdp, + .ndo_xdp_xmit = ena_xdp_xmit, }; static int ena_device_validate_params(struct ena_adapter *adapter, @@ -3671,9 +3732,8 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, rc = -EIO; } - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->tx_stats.missed_tx += missed_tx; - u64_stats_update_end(&tx_ring->syncp); + ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, + &tx_ring->syncp); return rc; } @@ -3756,9 +3816,8 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) rx_ring->empty_rx_queue++; if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->rx_stats.empty_rx_ring++; - u64_stats_update_end(&rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, + &rx_ring->syncp); netif_err(adapter, drv, adapter->netdev, "Trigger refill for ring %d\n", i); @@ -3788,9 +3847,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) if (unlikely(time_is_before_jiffies(keep_alive_expired))) { netif_err(adapter, drv, adapter->netdev, "Keep alive watchdog timeout.\n"); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.wd_expired++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.wd_expired, 1, + &adapter->syncp); adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } @@ -3801,9 +3859,8 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { netif_err(adapter, drv, adapter->netdev, "ENA admin queue is not in running state!\n"); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.admin_q_pause++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, + &adapter->syncp); adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); } @@ -4176,18 +4233,36 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_dev->dmadev = &pdev->dev; + netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS); + if (!netdev) { + dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); + rc = -ENOMEM; + goto err_free_region; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + adapter = netdev_priv(netdev); + adapter->ena_dev = ena_dev; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + ena_dev->net_device = netdev; + + pci_set_drvdata(pdev, adapter); + rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); if (rc) { dev_err(&pdev->dev, "ENA device init failed\n"); if (rc == -ETIME) rc = -EPROBE_DEFER; - goto err_free_region; + goto err_netdev_destroy; } rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { dev_err(&pdev->dev, "ENA llq bar mapping failed\n"); - goto err_free_ena_dev; + goto err_device_destroy; } calc_queue_ctx.ena_dev = ena_dev; @@ -4207,26 +4282,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_device_destroy; } - /* dev zeroed in init_etherdev */ - netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues); - if (!netdev) { - dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); - rc = -ENOMEM; - goto err_device_destroy; - } - - SET_NETDEV_DEV(netdev, &pdev->dev); - - adapter = netdev_priv(netdev); - pci_set_drvdata(pdev, adapter); - - adapter->ena_dev = ena_dev; - adapter->netdev = netdev; - adapter->pdev = pdev; - ena_set_conf_feat_params(adapter, &get_feat_ctx); - adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); adapter->reset_reason = ENA_REGS_RESET_NORMAL; adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; @@ -4257,7 +4314,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { dev_err(&pdev->dev, "Failed to query interrupt moderation feature\n"); - goto err_netdev_destroy; + goto err_device_destroy; } ena_init_io_rings(adapter, 0, @@ -4335,11 +4392,11 @@ err_free_msix: ena_disable_msix(adapter); err_worker_destroy: del_timer(&adapter->timer_service); -err_netdev_destroy: - free_netdev(netdev); err_device_destroy: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); +err_netdev_destroy: + free_netdev(netdev); err_free_region: ena_release_bars(ena_dev, pdev); err_free_ena_dev: @@ -4439,9 +4496,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d) struct pci_dev *pdev = to_pci_dev(dev_d); struct ena_adapter *adapter = pci_get_drvdata(pdev); - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.suspend++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); rtnl_lock(); if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { @@ -4462,9 +4517,7 @@ static int __maybe_unused ena_resume(struct device *dev_d) struct ena_adapter *adapter = dev_get_drvdata(dev_d); int rc; - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.resume++; - u64_stats_update_end(&adapter->syncp); + ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); rtnl_lock(); rc = ena_restore_device(adapter); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 30eb686749dc..74af15d62ee1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -170,12 +170,6 @@ struct ena_tx_buffer { * the xdp queues */ struct xdp_frame *xdpf; - /* The rx page for the rx buffer that was received in rx and - * re transmitted on xdp tx queues as a result of XDP_TX action. - * We need to free the page once we finished cleaning the buffer in - * clean_xdp_irq() - */ - struct page *xdp_rx_page; /* Indicate if bufs[0] map the linear data of the skb. */ u8 map_linear_data; @@ -239,6 +233,7 @@ struct ena_stats_rx { u64 xdp_pass; u64 xdp_tx; u64 xdp_invalid; + u64 xdp_redirect; }; struct ena_ring { @@ -263,6 +258,7 @@ struct ena_ring { struct ena_com_io_sq *ena_com_io_sq; struct bpf_prog *xdp_bpf_prog; struct xdp_rxq_info xdp_rxq; + spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */ u16 next_to_use; u16 next_to_clean; @@ -433,8 +429,8 @@ static inline bool ena_xdp_present_ring(struct ena_ring *ring) return !!ring->xdp_bpf_prog; } -static inline int ena_xdp_legal_queue_count(struct ena_adapter *adapter, - u32 queues) +static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter, + u32 queues) { return 2 * queues <= adapter->max_num_io_queues; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index bf5e0e9bd0e2..6c049864dac0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -1474,7 +1474,7 @@ int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map) for (i = 0; i < sizeof(cfg->prio_tc_map); i++) cfg->prio_tc_map[i] = cfg->tcs * i / 8; - cfg->is_qos = (tcs != 0 ? true : false); + cfg->is_qos = !!tcs; cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC); if (!cfg->is_ptp) netdev_warn(self->ndev, "%s\n", diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 926cca9a0c83..1a7148041e3d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -10,6 +10,8 @@ #ifndef AQ_NIC_H #define AQ_NIC_H +#include <linux/ethtool.h> + #include "aq_common.h" #include "aq_rss.h" #include "aq_hw.h" diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 7b80d924632a..f016f2e12ee7 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -2549,7 +2549,6 @@ static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data) */ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) { - s32 ret_val; s16 mii_autoneg_adv_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ @@ -2605,12 +2604,7 @@ static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw) hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; - ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); - - if (ret_val) - return ret_val; - - return 0; + return atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); } /* diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0af0af2b70fe..4edd6f8e017e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2884,7 +2884,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc) return rc; - rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); + rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); if (rc < 0) return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 47b3c3127879..950ea26ae0d2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -20,6 +20,7 @@ #define DRV_VER_MIN 10 #define DRV_VER_UPD 1 +#include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/rhashtable.h> #include <linux/crash_dump.h> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 184b6d0513b2..6b7b69ed62db 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -30,14 +30,12 @@ bnxt_dl_flash_update(struct devlink *dl, return -EPERM; } - devlink_flash_update_begin_notify(dl); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); - rc = bnxt_flash_package_from_file(bp->dev, params->file_name, 0); + rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0); if (!rc) devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0); else devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0); - devlink_flash_update_end_notify(dl); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1471c9a36238..9ff79d5d14c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -2100,19 +2100,16 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); -static int bnxt_flash_nvram(struct net_device *dev, - u16 dir_type, - u16 dir_ordinal, - u16 dir_ext, - u16 dir_attr, - const u8 *data, - size_t data_len) +static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len) { struct bnxt *bp = netdev_priv(dev); int rc; struct hwrm_nvm_write_input req = {0}; dma_addr_t dma_handle; - u8 *kmem; + u8 *kmem = NULL; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); @@ -2120,26 +2117,42 @@ static int bnxt_flash_nvram(struct net_device *dev, req.dir_ordinal = cpu_to_le16(dir_ordinal); req.dir_ext = cpu_to_le16(dir_ext); req.dir_attr = cpu_to_le16(dir_attr); - req.dir_data_length = cpu_to_le32(data_len); + req.dir_item_length = cpu_to_le32(dir_item_len); + if (data_len && data) { + req.dir_data_length = cpu_to_le32(data_len); - kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, - GFP_KERNEL); - if (!kmem) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)data_len); - return -ENOMEM; + kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, + GFP_KERNEL); + if (!kmem) + return -ENOMEM; + + memcpy(kmem, data, data_len); + req.host_src_addr = cpu_to_le64(dma_handle); } - memcpy(kmem, data, data_len); - req.host_src_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); + if (kmem) + dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } +static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + const u8 *data, size_t data_len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, + 0, data, data_len); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, u8 self_reset, u8 flags) { @@ -2419,104 +2432,166 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, return rc; } -int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, - u32 install_type) +#define BNXT_PKG_DMA_SIZE 0x40000 +#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) +#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) + +int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, + u32 install_type) { - struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; - const struct firmware *fw; + struct hwrm_nvm_install_update_output resp = {0}; + struct hwrm_nvm_modify_input modify = {0}; + struct bnxt *bp = netdev_priv(dev); + bool defrag_attempted = false; + dma_addr_t dma_handle; + u8 *kmem = NULL; + u32 modify_len; u32 item_len; int rc = 0; u16 index; bnxt_hwrm_fw_set_time(bp); - rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, &item_len, NULL); - if (rc) { - netdev_err(dev, "PKG update area not created in nvram\n"); - return rc; - } + bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); - rc = request_firmware(&fw, filename, &dev->dev); - if (rc != 0) { - netdev_err(dev, "PKG error %d requesting file: %s\n", - rc, filename); - return rc; + /* Try allocating a large DMA buffer first. Older fw will + * cause excessive NVRAM erases when using small blocks. + */ + modify_len = roundup_pow_of_two(fw->size); + modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); + while (1) { + kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, + &dma_handle, GFP_KERNEL); + if (!kmem && modify_len > PAGE_SIZE) + modify_len /= 2; + else + break; } + if (!kmem) + return -ENOMEM; - if (fw->size > item_len) { - netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", - (unsigned long)fw->size); - rc = -EFBIG; - } else { - dma_addr_t dma_handle; - u8 *kmem; - struct hwrm_nvm_modify_input modify = {0}; + modify.host_src_addr = cpu_to_le64(dma_handle); - bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); + if ((install_type & 0xffff) == 0) + install_type >>= 16; + install.install_type = cpu_to_le32(install_type); + + do { + u32 copied = 0, len = modify_len; + + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + BNX_DIR_EXT_NONE, + &index, &item_len, NULL); + if (rc) { + netdev_err(dev, "PKG update area not created in nvram\n"); + break; + } + if (fw->size > item_len) { + netdev_err(dev, "PKG insufficient update area in nvram: %lu\n", + (unsigned long)fw->size); + rc = -EFBIG; + break; + } modify.dir_idx = cpu_to_le16(index); - modify.len = cpu_to_le32(fw->size); - kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size, - &dma_handle, GFP_KERNEL); - if (!kmem) { - netdev_err(dev, - "dma_alloc_coherent failure, length = %u\n", - (unsigned int)fw->size); - rc = -ENOMEM; - } else { - memcpy(kmem, fw->data, fw->size); - modify.host_src_addr = cpu_to_le64(dma_handle); + if (fw->size > modify_len) + modify.flags = BNXT_NVM_MORE_FLAG; + while (copied < fw->size) { + u32 balance = fw->size - copied; + if (balance <= modify_len) { + len = balance; + if (copied) + modify.flags |= BNXT_NVM_LAST_FLAG; + } + memcpy(kmem, fw->data + copied, len); + modify.len = cpu_to_le32(len); + modify.offset = cpu_to_le32(copied); rc = hwrm_send_message(bp, &modify, sizeof(modify), FLASH_PACKAGE_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, fw->size, kmem, - dma_handle); + if (rc) + goto pkg_abort; + copied += len; } - } - release_firmware(fw); - if (rc) - goto err_exit; - - if ((install_type & 0xffff) == 0) - install_type >>= 16; - bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); - install.install_type = cpu_to_le32(install_type); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message_silent(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; + if (defrag_attempted) { + /* We have tried to defragment already in the previous + * iteration. Return with the result for INSTALL_UPDATE + */ + mutex_unlock(&bp->hwrm_cmd_lock); + break; + } - if (resp->error_code && error_code == + if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { - install.flags |= cpu_to_le16( - NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); + install.flags |= + cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); + + rc = _hwrm_send_message_silent(bp, &install, + sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + + if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { + /* FW has cleared NVM area, driver will create + * UPDATE directory and try the flash again + */ + defrag_attempted = true; + rc = __bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, + 0); + } else if (rc) { + netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); + } + } else if (rc) { + netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } - if (rc) - goto flash_pkg_exit; - } + mutex_unlock(&bp->hwrm_cmd_lock); + } while (defrag_attempted && !rc); - if (resp->result) { +pkg_abort: + dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); + if (resp.result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", - (s8)resp->result, (int)resp->problem_item); + (s8)resp.result, (int)resp.problem_item); rc = -ENOPKG; } -flash_pkg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); -err_exit: if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } +static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, + u32 install_type) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "PKG error %d requesting file: %s\n", + rc, filename); + return rc; + } + + rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type); + + release_firmware(fw); + + return rc; +} + static int bnxt_flash_device(struct net_device *dev, struct ethtool_flash *flash) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index fa6fbde52bea..0a57cb6a4a4b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -94,8 +94,8 @@ u32 bnxt_fw_to_ethtool_speed(u16); u16 bnxt_get_fw_auto_link_speeds(u32); int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, struct hwrm_nvm_get_dev_info_output *nvm_dev_info); -int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, - u32 install_type); +int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, + u32 install_type); void bnxt_ethtool_init(struct bnxt *bp); void bnxt_ethtool_free(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 23b80aa171dd..a217316228f4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -8,6 +8,7 @@ * the Free Software Foundation. */ +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index f335b7115c1b..dc34e38f97c7 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -218,17 +218,17 @@ do { \ /* Set the coalescing timer for the given ib */ #define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \ - ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)); + ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)) /* Acks 'events' # of events for a given ib while disabling interrupts */ #define bna_ib_ack_disable_irq(_i_dbell, _events) \ - (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \ - (_i_dbell)->doorbell_addr)); + (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \ + (_i_dbell)->doorbell_addr)) /* Acks 'events' # of events for a given ib */ #define bna_ib_ack(_i_dbell, _events) \ - (writel(((_i_dbell)->doorbell_ack | (_events)), \ - (_i_dbell)->doorbell_addr)); + (writel(((_i_dbell)->doorbell_ack | (_events)), \ + (_i_dbell)->doorbell_addr)) #define bna_ib_start(_bna, _ib, _is_regular) \ { \ @@ -259,12 +259,12 @@ do { \ } #define bna_txq_prod_indx_doorbell(_tcb) \ - (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ - (_tcb)->q_dbell)); + (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ + (_tcb)->q_dbell)) #define bna_rxq_prod_indx_doorbell(_rcb) \ - (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ - (_rcb)->q_dbell)); + (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ + (_rcb)->q_dbell)) /* TxQ, RxQ, CQ related bits, offsets, macros */ diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 5de47f6fde5a..d8c68906525a 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -77,10 +77,12 @@ #define MACB_RBQPH 0x04D4 /* GEM register offsets. */ +#define GEM_NCR 0x0000 /* Network Control */ #define GEM_NCFGR 0x0004 /* Network Config */ #define GEM_USRIO 0x000c /* User IO */ #define GEM_DMACFG 0x0010 /* DMA Configuration */ #define GEM_JML 0x0048 /* Jumbo Max Length */ +#define GEM_HS_MAC_CONFIG 0x0050 /* GEM high speed config */ #define GEM_HRB 0x0080 /* Hash Bottom */ #define GEM_HRT 0x0084 /* Hash Top */ #define GEM_SA1B 0x0088 /* Specific1 Bottom */ @@ -166,6 +168,9 @@ #define GEM_DCFG7 0x0298 /* Design Config 7 */ #define GEM_DCFG8 0x029C /* Design Config 8 */ #define GEM_DCFG10 0x02A4 /* Design Config 10 */ +#define GEM_DCFG12 0x02AC /* Design Config 12 */ +#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */ +#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */ #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ @@ -272,11 +277,19 @@ #define MACB_IRXFCS_OFFSET 19 #define MACB_IRXFCS_SIZE 1 +/* GEM specific NCR bitfields. */ +#define GEM_ENABLE_HS_MAC_OFFSET 31 +#define GEM_ENABLE_HS_MAC_SIZE 1 + /* GEM specific NCFGR bitfields. */ +#define GEM_FD_OFFSET 1 /* Full duplex */ +#define GEM_FD_SIZE 1 #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ #define GEM_GBE_SIZE 1 #define GEM_PCSSEL_OFFSET 11 #define GEM_PCSSEL_SIZE 1 +#define GEM_PAE_OFFSET 13 /* Pause enable */ +#define GEM_PAE_SIZE 1 #define GEM_CLK_OFFSET 18 /* MDC clock division */ #define GEM_CLK_SIZE 3 #define GEM_DBW_OFFSET 21 /* Data bus width */ @@ -461,11 +474,17 @@ #define MACB_REV_OFFSET 0 #define MACB_REV_SIZE 16 +/* Bitfield in HS_MAC_CONFIG */ +#define GEM_HS_MAC_SPEED_OFFSET 0 +#define GEM_HS_MAC_SPEED_SIZE 3 + /* Bitfields in DCFG1. */ #define GEM_IRQCOR_OFFSET 23 #define GEM_IRQCOR_SIZE 1 #define GEM_DBWDEF_OFFSET 25 #define GEM_DBWDEF_SIZE 3 +#define GEM_NO_PCS_OFFSET 0 +#define GEM_NO_PCS_SIZE 1 /* Bitfields in DCFG2. */ #define GEM_RX_PKT_BUFF_OFFSET 20 @@ -500,6 +519,28 @@ #define GEM_RXBD_RDBUFF_OFFSET 8 #define GEM_RXBD_RDBUFF_SIZE 4 +/* Bitfields in DCFG12. */ +#define GEM_HIGH_SPEED_OFFSET 26 +#define GEM_HIGH_SPEED_SIZE 1 + +/* Bitfields in USX_CONTROL. */ +#define GEM_USX_CTRL_SPEED_OFFSET 14 +#define GEM_USX_CTRL_SPEED_SIZE 3 +#define GEM_SERDES_RATE_OFFSET 12 +#define GEM_SERDES_RATE_SIZE 2 +#define GEM_RX_SCR_BYPASS_OFFSET 9 +#define GEM_RX_SCR_BYPASS_SIZE 1 +#define GEM_TX_SCR_BYPASS_OFFSET 8 +#define GEM_TX_SCR_BYPASS_SIZE 1 +#define GEM_TX_EN_OFFSET 1 +#define GEM_TX_EN_SIZE 1 +#define GEM_SIGNAL_OK_OFFSET 0 +#define GEM_SIGNAL_OK_SIZE 1 + +/* Bitfields in USX_STATUS. */ +#define GEM_USX_BLOCK_LOCK_OFFSET 0 +#define GEM_USX_BLOCK_LOCK_SIZE 1 + /* Bitfields in TISUBN */ #define GEM_SUBNSINCR_OFFSET 0 #define GEM_SUBNSINCRL_OFFSET 24 @@ -658,11 +699,14 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_CLK_HW_CHG 0x04000000 #define MACB_CAPS_MACB_IS_EMAC 0x08000000 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 #define MACB_CAPS_MACB_IS_GEM 0x80000000 +#define MACB_CAPS_PCS 0x01000000 +#define MACB_CAPS_HIGH_SPEED 0x02000000 /* LSO settings */ #define MACB_LSO_UFO_ENABLE 0x01 @@ -1104,6 +1148,14 @@ struct macb_pm_data { u32 usrio; }; +struct macb_usrio_config { + u32 mii; + u32 rmii; + u32 rgmii; + u32 refclk; + u32 hdfctlen; +}; + struct macb_config { u32 caps; unsigned int dma_burst_length; @@ -1112,6 +1164,7 @@ struct macb_config { struct clk **rx_clk, struct clk **tsu_clk); int (*init)(struct platform_device *pdev); int jumbo_max_len; + const struct macb_usrio_config *usrio; }; struct tsu_incr { @@ -1201,6 +1254,7 @@ struct macb { struct mii_bus *mii_bus; struct phylink *phylink; struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; u32 caps; unsigned int dma_burst_length; @@ -1209,8 +1263,6 @@ struct macb { /* AT91RM9200 transmit queue (1 on wire + 1 queued) */ struct macb_tx_skb rm9200_txq[2]; - unsigned int rm9200_tx_tail; - unsigned int rm9200_tx_len; unsigned int max_tx_length; u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES]; @@ -1244,6 +1296,7 @@ struct macb { u32 rx_intr_mask; struct macb_pm_data pm_data; + const struct macb_usrio_config *usrio; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 286f0341bdf8..d5d910916c2e 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -84,6 +84,9 @@ struct sifive_fu540_macb_mgmt { #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) +#define HS_SPEED_10000M 4 +#define MACB_SERDES_RATE_10G 1 + /* Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) */ @@ -457,15 +460,14 @@ static void macb_init_buffers(struct macb *bp) /** * macb_set_tx_clk() - Set a clock to a new frequency - * @clk: Pointer to the clock to change + * @bp: pointer to struct macb * @speed: New frequency in Hz - * @dev: Pointer to the struct net_device */ -static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) +static void macb_set_tx_clk(struct macb *bp, int speed) { long ferr, rate, rate_rounded; - if (!clk) + if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG)) return; switch (speed) { @@ -482,7 +484,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) return; } - rate_rounded = clk_round_rate(clk, rate); + rate_rounded = clk_round_rate(bp->tx_clk, rate); if (rate_rounded < 0) return; @@ -492,11 +494,12 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) ferr = abs(rate_rounded - rate); ferr = DIV_ROUND_UP(ferr, rate / 100000); if (ferr > 5) - netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", + netdev_warn(bp->dev, + "unable to generate target frequency: %ld Hz\n", rate); - if (clk_set_rate(clk, rate_rounded)) - netdev_err(dev, "adjusting tx_clk failed.\n"); + if (clk_set_rate(bp->tx_clk, rate_rounded)) + netdev_err(bp->dev, "adjusting tx_clk failed.\n"); } static void macb_validate(struct phylink_config *config, @@ -513,6 +516,7 @@ static void macb_validate(struct phylink_config *config, state->interface != PHY_INTERFACE_MODE_RMII && state->interface != PHY_INTERFACE_MODE_GMII && state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_10GBASER && !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); return; @@ -525,10 +529,31 @@ static void macb_validate(struct phylink_config *config, return; } + if (state->interface == PHY_INTERFACE_MODE_10GBASER && + !(bp->caps & MACB_CAPS_HIGH_SPEED && + bp->caps & MACB_CAPS_PCS)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Asym_Pause); + if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && + (state->interface == PHY_INTERFACE_MODE_NA || + state->interface == PHY_INTERFACE_MODE_10GBASER)) { + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseT_Full); + if (state->interface != PHY_INTERFACE_MODE_NA) + goto out; + } + phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); phylink_set(mask, 100baseT_Half); @@ -545,23 +570,90 @@ static void macb_validate(struct phylink_config *config, if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) phylink_set(mask, 1000baseT_Half); } - +out: bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_and(state->advertising, state->advertising, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static void macb_mac_pcs_get_state(struct phylink_config *config, +static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, + int duplex) +{ + struct macb *bp = container_of(pcs, struct macb, phylink_pcs); + u32 config; + + config = gem_readl(bp, USX_CONTROL); + config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config); + config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config); + config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS)); + config |= GEM_BIT(TX_EN); + gem_writel(bp, USX_CONTROL, config); +} + +static void macb_usx_pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { + struct macb *bp = container_of(pcs, struct macb, phylink_pcs); + u32 val; + + state->speed = SPEED_10000; + state->duplex = 1; + state->an_complete = 1; + + val = gem_readl(bp, USX_STATUS); + state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK)); + val = gem_readl(bp, NCFGR); + if (val & GEM_BIT(PAE)) + state->pause = MLO_PAUSE_RX; +} + +static int macb_usx_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct macb *bp = container_of(pcs, struct macb, phylink_pcs); + + gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | + GEM_BIT(SIGNAL_OK)); + + return 0; +} + +static void macb_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ state->link = 0; } -static void macb_mac_an_restart(struct phylink_config *config) +static void macb_pcs_an_restart(struct phylink_pcs *pcs) { /* Not supported */ } +static int macb_pcs_config(struct phylink_pcs *pcs, + unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = { + .pcs_get_state = macb_usx_pcs_get_state, + .pcs_config = macb_usx_pcs_config, + .pcs_link_up = macb_usx_pcs_link_up, +}; + +static const struct phylink_pcs_ops macb_phylink_pcs_ops = { + .pcs_get_state = macb_pcs_get_state, + .pcs_an_restart = macb_pcs_an_restart, + .pcs_config = macb_pcs_config, +}; + static void macb_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { @@ -569,25 +661,35 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, struct macb *bp = netdev_priv(ndev); unsigned long flags; u32 old_ctrl, ctrl; + u32 old_ncr, ncr; spin_lock_irqsave(&bp->lock, flags); old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); + old_ncr = ncr = macb_or_gem_readl(bp, NCR); if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { if (state->interface == PHY_INTERFACE_MODE_RMII) ctrl |= MACB_BIT(RM9200_RMII); } else if (macb_is_gem(bp)) { ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); + ncr &= ~GEM_BIT(ENABLE_HS_MAC); - if (state->interface == PHY_INTERFACE_MODE_SGMII) + if (state->interface == PHY_INTERFACE_MODE_SGMII) { ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); + } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { + ctrl |= GEM_BIT(PCSSEL); + ncr |= GEM_BIT(ENABLE_HS_MAC); + } } /* Apply the new configuration, if any */ if (old_ctrl ^ ctrl) macb_or_gem_writel(bp, NCFGR, ctrl); + if (old_ncr ^ ncr) + macb_or_gem_writel(bp, NCR, ncr); + spin_unlock_irqrestore(&bp->lock, flags); } @@ -649,7 +751,7 @@ static void macb_mac_link_up(struct phylink_config *config, if (rx_pause) ctrl |= MACB_BIT(PAE); - macb_set_tx_clk(bp->tx_clk, speed, ndev); + macb_set_tx_clk(bp, speed); /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down * cleared the pipeline and control registers. @@ -664,6 +766,10 @@ static void macb_mac_link_up(struct phylink_config *config, macb_or_gem_writel(bp, NCFGR, ctrl); + if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) + gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, + gem_readl(bp, HS_MAC_CONFIG))); + spin_unlock_irqrestore(&bp->lock, flags); /* Enable Rx and Tx */ @@ -672,10 +778,28 @@ static void macb_mac_link_up(struct phylink_config *config, netif_tx_wake_all_queues(ndev); } +static int macb_mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct macb *bp = netdev_priv(ndev); + + if (interface == PHY_INTERFACE_MODE_10GBASER) + bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops; + else if (interface == PHY_INTERFACE_MODE_SGMII) + bp->phylink_pcs.ops = &macb_phylink_pcs_ops; + else + bp->phylink_pcs.ops = NULL; + + if (bp->phylink_pcs.ops) + phylink_set_pcs(bp->phylink, &bp->phylink_pcs); + + return 0; +} + static const struct phylink_mac_ops macb_phylink_ops = { .validate = macb_validate, - .mac_pcs_get_state = macb_mac_pcs_get_state, - .mac_an_restart = macb_mac_an_restart, + .mac_prepare = macb_mac_prepare, .mac_config = macb_mac_config, .mac_link_down = macb_mac_link_down, .mac_link_up = macb_mac_link_up, @@ -3524,6 +3648,11 @@ static void macb_configure_caps(struct macb *bp, dcfg = gem_readl(bp, DCFG1); if (GEM_BFEXT(IRQCOR, dcfg) == 0) bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; + if (GEM_BFEXT(NO_PCS, dcfg) == 0) + bp->caps |= MACB_CAPS_PCS; + dcfg = gem_readl(bp, DCFG12); + if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1) + bp->caps |= MACB_CAPS_HIGH_SPEED; dcfg = gem_readl(bp, DCFG2); if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0) bp->caps |= MACB_CAPS_FIFO_MODE; @@ -3565,6 +3694,20 @@ static void macb_probe_queues(void __iomem *mem, *num_queues = hweight32(*queue_mask); } +static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, + struct clk *rx_clk, struct clk *tsu_clk) +{ + struct clk_bulk_data clks[] = { + { .clk = tsu_clk, }, + { .clk = rx_clk, }, + { .clk = pclk, }, + { .clk = hclk, }, + { .clk = tx_clk }, + }; + + clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks); +} + static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, struct clk **rx_clk, struct clk **tsu_clk) @@ -3582,19 +3725,13 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, } if (IS_ERR_OR_NULL(*pclk)) { - err = PTR_ERR(*pclk); - if (!err) - err = -ENODEV; - + err = IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV; dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); return err; } if (IS_ERR_OR_NULL(*hclk)) { - err = PTR_ERR(*hclk); - if (!err) - err = -ENODEV; - + err = IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV; dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); return err; } @@ -3790,15 +3927,15 @@ static int macb_init(struct platform_device *pdev) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { val = 0; if (phy_interface_mode_is_rgmii(bp->phy_interface)) - val = GEM_BIT(RGMII); + val = bp->usrio->rgmii; else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) - val = MACB_BIT(RMII); + val = bp->usrio->rmii; else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) - val = MACB_BIT(MII); + val = bp->usrio->mii; if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) - val |= MACB_BIT(CLKEN); + val |= bp->usrio->refclk; macb_or_gem_writel(bp, USRIO, val); } @@ -3909,7 +4046,6 @@ static int at91ether_start(struct macb *lp) MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TCOMP) | - MACB_BIT(RM9200_TBRE) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); @@ -3926,7 +4062,6 @@ static void at91ether_stop(struct macb *lp) MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TCOMP) | - MACB_BIT(RM9200_TBRE) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP)); @@ -3996,10 +4131,11 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macb *lp = netdev_priv(dev); - unsigned long flags; - if (lp->rm9200_tx_len < 2) { - int desc = lp->rm9200_tx_tail; + if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) { + int desc = 0; + + netif_stop_queue(dev); /* Store packet information (to free when Tx completed) */ lp->rm9200_txq[desc].skb = skb; @@ -4013,15 +4149,6 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - spin_lock_irqsave(&lp->lock, flags); - - lp->rm9200_tx_tail = (desc + 1) & 1; - lp->rm9200_tx_len++; - if (lp->rm9200_tx_len > 1) - netif_stop_queue(dev); - - spin_unlock_irqrestore(&lp->lock, flags); - /* Set address of the data in the Transmit Address register */ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping); /* Set length of the packet in the Transmit Control register */ @@ -4087,8 +4214,6 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) struct macb *lp = netdev_priv(dev); u32 intstatus, ctl; unsigned int desc; - unsigned int qlen; - u32 tsr; /* MAC Interrupt Status register indicates what interrupts are pending. * It is automatically cleared once read. @@ -4100,39 +4225,21 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) at91ether_rx(dev); /* Transmit complete */ - if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) { + if (intstatus & MACB_BIT(TCOMP)) { /* The TCOM bit is set even if the transmission failed */ if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) dev->stats.tx_errors++; - spin_lock(&lp->lock); - - tsr = macb_readl(lp, TSR); - - /* we have three possibilities here: - * - all pending packets transmitted (TGO, implies BNQ) - * - only first packet transmitted (!TGO && BNQ) - * - two frames pending (!TGO && !BNQ) - * Note that TGO ("transmit go") is called "IDLE" on RM9200. - */ - qlen = (tsr & MACB_BIT(TGO)) ? 0 : - (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2; - - while (lp->rm9200_tx_len > qlen) { - desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1; + desc = 0; + if (lp->rm9200_txq[desc].skb) { dev_consume_skb_irq(lp->rm9200_txq[desc].skb); lp->rm9200_txq[desc].skb = NULL; dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping, lp->rm9200_txq[desc].size, DMA_TO_DEVICE); dev->stats.tx_packets++; dev->stats.tx_bytes += lp->rm9200_txq[desc].size; - lp->rm9200_tx_len--; } - - if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev)) - netif_wake_queue(dev); - - spin_unlock(&lp->lock); + netif_wake_queue(dev); } /* Work-around for EMAC Errata section 41.3.1 */ @@ -4283,8 +4390,10 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, return err; mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); - if (!mgmt) - return -ENOMEM; + if (!mgmt) { + err = -ENOMEM; + goto err_disable_clks; + } init.name = "sifive-gemgxl-mgmt"; init.ops = &fu540_c000_ops; @@ -4295,16 +4404,26 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk, mgmt->hw.init = &init; *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw); - if (IS_ERR(*tx_clk)) - return PTR_ERR(*tx_clk); + if (IS_ERR(*tx_clk)) { + err = PTR_ERR(*tx_clk); + goto err_disable_clks; + } err = clk_prepare_enable(*tx_clk); - if (err) + if (err) { dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); - else + *tx_clk = NULL; + goto err_disable_clks; + } else { dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name); + } return 0; + +err_disable_clks: + macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk); + + return err; } static int fu540_c000_init(struct platform_device *pdev) @@ -4316,6 +4435,21 @@ static int fu540_c000_init(struct platform_device *pdev) return macb_init(pdev); } +static const struct macb_usrio_config macb_default_usrio = { + .mii = MACB_BIT(MII), + .rmii = MACB_BIT(RMII), + .rgmii = GEM_BIT(RGMII), + .refclk = MACB_BIT(CLKEN), +}; + +static const struct macb_usrio_config sama7g5_usrio = { + .mii = 0, + .rmii = 1, + .rgmii = 2, + .refclk = BIT(2), + .hdfctlen = BIT(6), +}; + static const struct macb_config fu540_c000_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | MACB_CAPS_GEM_HAS_PTP, @@ -4323,12 +4457,14 @@ static const struct macb_config fu540_c000_config = { .clk_init = fu540_c000_clk_init, .init = fu540_c000_init, .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, }; static const struct macb_config at91sam9260_config = { .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d3macb_config = { @@ -4336,6 +4472,7 @@ static const struct macb_config sama5d3macb_config = { | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config pc302gem_config = { @@ -4343,6 +4480,7 @@ static const struct macb_config pc302gem_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d2_config = { @@ -4350,6 +4488,7 @@ static const struct macb_config sama5d2_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d3_config = { @@ -4359,6 +4498,7 @@ static const struct macb_config sama5d3_config = { .clk_init = macb_clk_init, .init = macb_init, .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, }; static const struct macb_config sama5d4_config = { @@ -4366,18 +4506,21 @@ static const struct macb_config sama5d4_config = { .dma_burst_length = 4, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config emac_config = { .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC, .clk_init = at91ether_clk_init, .init = at91ether_init, + .usrio = &macb_default_usrio, }; static const struct macb_config np4_config = { .caps = MACB_CAPS_USRIO_DISABLED, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, }; static const struct macb_config zynqmp_config = { @@ -4388,6 +4531,7 @@ static const struct macb_config zynqmp_config = { .clk_init = macb_clk_init, .init = macb_init, .jumbo_max_len = 10240, + .usrio = &macb_default_usrio, }; static const struct macb_config zynq_config = { @@ -4396,6 +4540,23 @@ static const struct macb_config zynq_config = { .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, + .usrio = &macb_default_usrio, +}; + +static const struct macb_config sama7g5_gem_config = { + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .usrio = &sama7g5_usrio, +}; + +static const struct macb_config sama7g5_emac_config = { + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .usrio = &sama7g5_usrio, }; static const struct of_device_id macb_dt_ids[] = { @@ -4415,6 +4576,8 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, { .compatible = "cdns,zynq-gem", .data = &zynq_config }, { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, + { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config }, + { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); @@ -4517,6 +4680,8 @@ static int macb_probe(struct platform_device *pdev) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + bp->usrio = macb_config->usrio; + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -4612,11 +4777,7 @@ err_out_free_netdev: free_netdev(dev); err_disable_clocks: - clk_disable_unprepare(tx_clk); - clk_disable_unprepare(hclk); - clk_disable_unprepare(pclk); - clk_disable_unprepare(rx_clk); - clk_disable_unprepare(tsu_clk); + macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); @@ -4641,11 +4802,8 @@ static int macb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); if (!pm_runtime_suspended(&pdev->dev)) { - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); - clk_disable_unprepare(bp->rx_clk); - clk_disable_unprepare(bp->tsu_clk); + macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, + bp->rx_clk, bp->tsu_clk); pm_runtime_set_suspended(&pdev->dev); } phylink_destroy(bp->phylink); @@ -4824,13 +4982,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); - if (!(device_may_wakeup(dev))) { - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); - clk_disable_unprepare(bp->rx_clk); - } - clk_disable_unprepare(bp->tsu_clk); + if (!(device_may_wakeup(dev))) + macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); + else + macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c index 2a6d1cadac9e..30254e4cf70f 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c @@ -27,7 +27,6 @@ #include "cn66xx_device.h" #include "cn68xx_device.h" #include "cn68xx_regs.h" -#include "cn68xx_device.h" static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 16eebfc52109..66f2c553370c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -15,6 +15,7 @@ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more details. ***********************************************************************/ +#include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/net_tstamp.h> #include <linux/pci.h> diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index c7bdac79299a..2f218fbfed06 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -5,6 +5,7 @@ /* ETHTOOL Support for VNIC_VF Device*/ +#include <linux/ethtool.h> #include <linux/pci.h> #include <linux/net_tstamp.h> diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 7a141ce32e86..f782e6af45e9 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -770,7 +770,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, rq->caching = 1; /* Driver have no proper error path for failed XDP RX-queue info reg */ - WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0); + WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx, 0) < 0); /* Send a mailbox msg to PF to config RQ */ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 27308600da15..8e681ce72d62 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -39,6 +39,7 @@ #include <linux/bitops.h> #include <linux/cache.h> +#include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 17410fe86626..7d49fd4edc9e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2671,7 +2671,7 @@ do { \ seq_printf(seq, "%-12s", s); \ for (i = 0; i < n; ++i) \ seq_printf(seq, " %16" fmt_spec, v); \ - seq_putc(seq, '\n'); \ + seq_putc(seq, '\n'); \ } while (0) #define S(s, v) S3("s", s, v) #define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index cd8f9a481d73..d546993bda09 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -33,6 +33,7 @@ * SOFTWARE. */ +#include <linux/ethtool.h> #include <linux/pci.h> #include "t4vf_common.h" diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index 65617752c630..72bb123d53db 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -236,6 +236,7 @@ struct chtls_dev { struct list_head na_node; unsigned int send_page_order; int max_host_sndbuf; + u32 round_robin_cnt; struct key_map kmap; unsigned int cdev_state; }; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 50e3a70e5a29..95ab871d8d59 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1218,8 +1218,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->sndbuf = csk->snd_win; csk->ulp_mode = ULP_MODE_TLS; step = cdev->lldi->nrxq / cdev->lldi->nchan; - csk->rss_qid = cdev->lldi->rxq_ids[port_id * step]; rxq_idx = port_id * step; + rxq_idx += cdev->round_robin_cnt++ % step; + csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx]; csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx : port_id * step; csk->sndbuf = newsk->sk_sndbuf; diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c index 9c682aff3834..519323460f26 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -36,8 +36,6 @@ void vnic_cq_free(struct vnic_cq *cq) int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int desc_count, unsigned int desc_size) { - int err; - cq->index = index; cq->vdev = vdev; @@ -47,11 +45,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, return -EINVAL; } - err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); - if (err) - return err; - - return 0; + return vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); } void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig index 76247103e566..7af86b6d4150 100644 --- a/drivers/net/ethernet/davicom/Kconfig +++ b/drivers/net/ethernet/davicom/Kconfig @@ -5,7 +5,7 @@ config DM9000 tristate "DM9000 support" - depends on ARM || MIPS || COLDFIRE || NIOS2 + depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST select CRC32 select MII help diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 5c6c8c5ec747..3fdc70dab5c1 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -232,32 +232,29 @@ static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) static void dm9000_dumpblk_8bit(void __iomem *reg, int count) { int i; - int tmp; for (i = 0; i < count; i++) - tmp = readb(reg); + readb(reg); } static void dm9000_dumpblk_16bit(void __iomem *reg, int count) { int i; - int tmp; count = (count + 1) >> 1; for (i = 0; i < count; i++) - tmp = readw(reg); + readw(reg); } static void dm9000_dumpblk_32bit(void __iomem *reg, int count) { int i; - int tmp; count = (count + 3) >> 2; for (i = 0; i < count; i++) - tmp = readl(reg); + readl(reg); } /* diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index d9f6c19940ef..c3cbe55205a7 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -2175,11 +2175,21 @@ out: static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume); +static void de_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + rtnl_lock(); + dev_close(dev); + rtnl_unlock(); +} + static struct pci_driver de_driver = { .name = DRV_NAME, .id_table = de_pci_tbl, .probe = de_init_one, .remove = de_remove_one, + .shutdown = de_shutdown, .driver.pm = &de_pm_ops, }; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index e7b0d7de40fd..c1dcd6ca1457 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1293,7 +1293,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static unsigned char last_phys_addr[ETH_ALEN] = { 0x00, 'L', 'i', 'n', 'u', 'x' }; +#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ static int last_irq; +#endif int i, irq; unsigned short sum; unsigned char *ee_data; @@ -1617,7 +1619,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 6; i++) last_phys_addr[i] = dev->dev_addr[i]; +#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ last_irq = irq; +#endif /* The lower four bits are the media type. */ if (board_idx >= 0 && board_idx < MAX_UNITS) { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 676e437d78f6..d402d83d9edd 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4677,7 +4677,6 @@ static int be_if_create(struct be_adapter *adapter) { u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; u32 cap_flags = be_if_cap_flags(adapter); - int status; /* alloc required memory for other filtering fields */ adapter->pmac_id = kcalloc(be_max_uc(adapter), @@ -4700,13 +4699,8 @@ static int be_if_create(struct be_adapter *adapter) en_flags &= cap_flags; /* will enable all the needed filter flags in be_open() */ - status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + return be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); - - if (status) - return status; - - return 0; } int be_update_queues(struct be_adapter *adapter) diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 80fb1f537bb3..88bfe2107938 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1044,10 +1044,39 @@ static void ftgmac100_adjust_link(struct net_device *netdev) schedule_work(&priv->reset_task); } -static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) +static int ftgmac100_mii_probe(struct net_device *netdev) { - struct net_device *netdev = priv->netdev; + struct ftgmac100 *priv = netdev_priv(netdev); + struct platform_device *pdev = to_platform_device(priv->dev); + struct device_node *np = pdev->dev.of_node; struct phy_device *phydev; + phy_interface_t phy_intf; + int err; + + /* Default to RGMII. It's a gigabit part after all */ + err = of_get_phy_mode(np, &phy_intf); + if (err) + phy_intf = PHY_INTERFACE_MODE_RGMII; + + /* Aspeed only supports these. I don't know about other IP + * block vendors so I'm going to just let them through for + * now. Note that this is only a warning if for some obscure + * reason the DT really means to lie about it or it's a newer + * part we don't know about. + * + * On the Aspeed SoC there are additionally straps and SCU + * control bits that could tell us what the interface is + * (or allow us to configure it while the IP block is held + * in reset). For now I chose to keep this driver away from + * those SoC specific bits and assume the device-tree is + * right and the SCU has been configured properly by pinmux + * or the firmware. + */ + if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) { + netdev_warn(netdev, + "Unsupported PHY mode %s !\n", + phy_modes(phy_intf)); + } phydev = phy_find_first(priv->mii_bus); if (!phydev) { @@ -1056,7 +1085,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) } phydev = phy_connect(netdev, phydev_name(phydev), - &ftgmac100_adjust_link, intf); + &ftgmac100_adjust_link, phy_intf); if (IS_ERR(phydev)) { netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name); @@ -1601,8 +1630,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); struct platform_device *pdev = to_platform_device(priv->dev); - phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII; struct device_node *np = pdev->dev.of_node; + struct device_node *mdio_np; int i, err = 0; u32 reg; @@ -1623,39 +1652,6 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); } - /* Get PHY mode from device-tree */ - if (np) { - /* Default to RGMII. It's a gigabit part after all */ - err = of_get_phy_mode(np, &phy_intf); - if (err) - phy_intf = PHY_INTERFACE_MODE_RGMII; - - /* Aspeed only supports these. I don't know about other IP - * block vendors so I'm going to just let them through for - * now. Note that this is only a warning if for some obscure - * reason the DT really means to lie about it or it's a newer - * part we don't know about. - * - * On the Aspeed SoC there are additionally straps and SCU - * control bits that could tell us what the interface is - * (or allow us to configure it while the IP block is held - * in reset). For now I chose to keep this driver away from - * those SoC specific bits and assume the device-tree is - * right and the SCU has been configured properly by pinmux - * or the firmware. - */ - if (priv->is_aspeed && - phy_intf != PHY_INTERFACE_MODE_RMII && - phy_intf != PHY_INTERFACE_MODE_RGMII && - phy_intf != PHY_INTERFACE_MODE_RGMII_ID && - phy_intf != PHY_INTERFACE_MODE_RGMII_RXID && - phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) { - netdev_warn(netdev, - "Unsupported PHY mode %s !\n", - phy_modes(phy_intf)); - } - } - priv->mii_bus->name = "ftgmac100_mdio"; snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); @@ -1667,35 +1663,38 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) for (i = 0; i < PHY_MAX_ADDR; i++) priv->mii_bus->irq[i] = PHY_POLL; - err = mdiobus_register(priv->mii_bus); + mdio_np = of_get_child_by_name(np, "mdio"); + + err = of_mdiobus_register(priv->mii_bus, mdio_np); if (err) { dev_err(priv->dev, "Cannot register MDIO bus!\n"); goto err_register_mdiobus; } - err = ftgmac100_mii_probe(priv, phy_intf); - if (err) { - dev_err(priv->dev, "MII Probe failed!\n"); - goto err_mii_probe; - } + of_node_put(mdio_np); return 0; -err_mii_probe: - mdiobus_unregister(priv->mii_bus); err_register_mdiobus: mdiobus_free(priv->mii_bus); return err; } +static void ftgmac100_phy_disconnect(struct net_device *netdev) +{ + if (!netdev->phydev) + return; + + phy_disconnect(netdev->phydev); +} + static void ftgmac100_destroy_mdio(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); - if (!netdev->phydev) + if (!priv->mii_bus) return; - phy_disconnect(netdev->phydev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); } @@ -1830,22 +1829,33 @@ static int ftgmac100_probe(struct platform_device *pdev) if (np && of_get_property(np, "use-ncsi", NULL)) { if (!IS_ENABLED(CONFIG_NET_NCSI)) { dev_err(&pdev->dev, "NCSI stack not enabled\n"); - goto err_ncsi_dev; + goto err_phy_connect; } dev_info(&pdev->dev, "Using NCSI interface\n"); priv->use_ncsi = true; priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); if (!priv->ndev) - goto err_ncsi_dev; + goto err_phy_connect; } else if (np && of_get_property(np, "phy-handle", NULL)) { struct phy_device *phy; + /* Support "mdio"/"phy" child nodes for ast2400/2500 with + * an embedded MDIO controller. Automatically scan the DTS for + * available PHYs and register them. + */ + if (of_device_is_compatible(np, "aspeed,ast2400-mac") || + of_device_is_compatible(np, "aspeed,ast2500-mac")) { + err = ftgmac100_setup_mdio(netdev); + if (err) + goto err_setup_mdio; + } + phy = of_phy_get_and_connect(priv->netdev, np, &ftgmac100_adjust_link); if (!phy) { dev_err(&pdev->dev, "Failed to connect to phy\n"); - goto err_setup_mdio; + goto err_phy_connect; } /* Indicate that we support PAUSE frames (see comment in @@ -1865,12 +1875,19 @@ static int ftgmac100_probe(struct platform_device *pdev) err = ftgmac100_setup_mdio(netdev); if (err) goto err_setup_mdio; + + err = ftgmac100_mii_probe(netdev); + if (err) { + dev_err(priv->dev, "MII probe failed!\n"); + goto err_ncsi_dev; + } + } if (priv->is_aspeed) { err = ftgmac100_setup_clk(priv); if (err) - goto err_ncsi_dev; + goto err_phy_connect; } /* Default ring sizes */ @@ -1906,6 +1923,8 @@ static int ftgmac100_probe(struct platform_device *pdev) err_register_netdev: clk_disable_unprepare(priv->rclk); clk_disable_unprepare(priv->clk); +err_phy_connect: + ftgmac100_phy_disconnect(netdev); err_ncsi_dev: if (priv->ndev) ncsi_unregister_dev(priv->ndev); @@ -1940,6 +1959,7 @@ static int ftgmac100_remove(struct platform_device *pdev) */ cancel_work_sync(&priv->reset_task); + ftgmac100_phy_disconnect(netdev); ftgmac100_destroy_mdio(netdev); iounmap(priv->base); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index cb7c028b1bf5..4360ce4d3fb6 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -53,6 +53,8 @@ #include <linux/dma-mapping.h> #include <linux/sort.h> #include <linux/phy_fixed.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <soc/fsl/bman.h> #include <soc/fsl/qman.h> #include "fman.h" @@ -177,7 +179,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); #define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \ + DPAA_HASH_RESULTS_SIZE) #define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \ - dpaa_rx_extra_headroom) + XDP_PACKET_HEADROOM - DPAA_HWA_SIZE) #ifdef CONFIG_DPAA_ERRATUM_A050385 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE) #define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \ @@ -1128,6 +1130,25 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) dpaa_fq->fqid = qman_fq_fqid(fq); + if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || + dpaa_fq->fq_type == FQ_TYPE_RX_PCD) { + err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev, + dpaa_fq->fqid, 0); + if (err) { + dev_err(dev, "xdp_rxq_info_reg() = %d\n", err); + return err; + } + + err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) { + dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n", + err); + xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); + return err; + } + } + return 0; } @@ -1157,6 +1178,11 @@ static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) } } + if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || + dpaa_fq->fq_type == FQ_TYPE_RX_PCD) && + xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq)) + xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq); + qman_destroy_fq(fq); list_del(&dpaa_fq->list); @@ -1599,17 +1625,13 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) { struct dpaa_bp *dpaa_bp; int *countptr; - int res; dpaa_bp = priv->dpaa_bp; if (!dpaa_bp) return -EINVAL; countptr = this_cpu_ptr(dpaa_bp->percpu_count); - res = dpaa_eth_refill_bpool(dpaa_bp, countptr); - if (res) - return res; - return 0; + return dpaa_eth_refill_bpool(dpaa_bp, countptr); } /* Cleanup function for outgoing frame descriptors that were built on Tx path, @@ -1623,6 +1645,9 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) * * Return the skb backpointer, since for S/G frames the buffer containing it * gets freed here. + * + * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer + * and return NULL in this case. */ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, const struct qm_fd *fd, bool ts) @@ -1633,6 +1658,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, dma_addr_t addr = qm_fd_addr(fd); void *vaddr = phys_to_virt(addr); const struct qm_sg_entry *sgt; + struct dpaa_eth_swbp *swbp; struct sk_buff *skb; u64 ns; int i; @@ -1661,11 +1687,20 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, } } else { dma_unmap_single(priv->tx_dma_dev, addr, - priv->tx_headroom + qm_fd_get_length(fd), + qm_fd_get_offset(fd) + qm_fd_get_length(fd), dma_dir); } - skb = *(struct sk_buff **)vaddr; + swbp = (struct dpaa_eth_swbp *)vaddr; + skb = swbp->skb; + + /* No skb backpointer is set when running XDP. An xdp_frame + * backpointer is saved instead. + */ + if (!skb) { + xdp_return_frame(swbp->xdpf); + return NULL; + } /* DMA unmapping is required before accessing the HW provided info */ if (ts && priv->tx_tstamp && @@ -1731,7 +1766,6 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) goto free_buffer; - WARN_ON(fd_off != priv->rx_headroom); skb_reserve(skb, fd_off); skb_put(skb, qm_fd_get_length(fd)); @@ -1879,8 +1913,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, { struct net_device *net_dev = priv->net_dev; enum dma_data_direction dma_dir; + struct dpaa_eth_swbp *swbp; unsigned char *buff_start; - struct sk_buff **skbh; dma_addr_t addr; int err; @@ -1891,8 +1925,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv, buff_start = skb->data - priv->tx_headroom; dma_dir = DMA_TO_DEVICE; - skbh = (struct sk_buff **)buff_start; - *skbh = skb; + swbp = (struct dpaa_eth_swbp *)buff_start; + swbp->skb = skb; /* Enable L3/L4 hardware checksum computation. * @@ -1931,8 +1965,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, const enum dma_data_direction dma_dir = DMA_TO_DEVICE; const int nr_frags = skb_shinfo(skb)->nr_frags; struct net_device *net_dev = priv->net_dev; + struct dpaa_eth_swbp *swbp; struct qm_sg_entry *sgt; - struct sk_buff **skbh; void *buff_start; skb_frag_t *frag; dma_addr_t addr; @@ -2005,8 +2039,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, qm_fd_set_sg(fd, priv->tx_headroom, skb->len); /* DMA map the SGT page */ - skbh = (struct sk_buff **)buff_start; - *skbh = skb; + swbp = (struct dpaa_eth_swbp *)buff_start; + swbp->skb = skb; addr = dma_map_page(priv->tx_dma_dev, p, 0, priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); @@ -2067,7 +2101,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv, } #ifdef CONFIG_DPAA_ERRATUM_A050385 -static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s) +static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s) { struct dpaa_priv *priv = netdev_priv(net_dev); struct sk_buff *new_skb, *skb = *s; @@ -2141,6 +2175,52 @@ workaround: return 0; } + +static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, + struct xdp_frame **init_xdpf) +{ + struct xdp_frame *new_xdpf, *xdpf = *init_xdpf; + void *new_buff; + struct page *p; + + /* Check the data alignment and make sure the headroom is large + * enough to store the xdpf backpointer. Use an aligned headroom + * value. + * + * Due to alignment constraints, we give XDP access to the full 256 + * byte frame headroom. If the XDP program uses all of it, copy the + * data to a new buffer and make room for storing the backpointer. + */ + if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) && + xdpf->headroom >= priv->tx_headroom) { + xdpf->headroom = priv->tx_headroom; + return 0; + } + + p = dev_alloc_pages(0); + if (unlikely(!p)) + return -ENOMEM; + + /* Copy the data to the new buffer at a properly aligned offset */ + new_buff = page_address(p); + memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len); + + /* Create an XDP frame around the new buffer in a similar fashion + * to xdp_convert_buff_to_frame. + */ + new_xdpf = new_buff; + new_xdpf->data = new_buff + priv->tx_headroom; + new_xdpf->len = xdpf->len; + new_xdpf->headroom = priv->tx_headroom; + new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; + new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; + + /* Release the initial buffer */ + xdp_return_frame_rx_napi(xdpf); + + *init_xdpf = new_xdpf; + return 0; +} #endif static netdev_tx_t @@ -2191,7 +2271,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) #ifdef CONFIG_DPAA_ERRATUM_A050385 if (unlikely(fman_has_errata_a050385())) { - if (dpaa_a050385_wa(net_dev, &skb)) + if (dpaa_a050385_wa_skb(net_dev, &skb)) goto enomem; nonlinear = skb_is_nonlinear(skb); } @@ -2275,8 +2355,11 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget) { struct dpaa_napi_portal *np = container_of(napi, struct dpaa_napi_portal, napi); + int cleaned; + + np->xdp_act = 0; - int cleaned = qman_p_poll_dqrr(np->p, budget); + cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { napi_complete_done(napi, cleaned); @@ -2285,6 +2368,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget) qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } + if (np->xdp_act & XDP_REDIRECT) + xdp_do_flush(); + return cleaned; } @@ -2313,9 +2399,9 @@ static void dpaa_tx_conf(struct net_device *net_dev, } static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, - struct qman_portal *portal) + struct qman_portal *portal, bool sched_napi) { - if (unlikely(in_irq() || !in_serving_softirq())) { + if (sched_napi) { /* Disable QMan IRQ and invoke NAPI */ qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); @@ -2329,7 +2415,8 @@ static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, struct qman_fq *fq, - const struct qm_dqrr_entry *dq) + const struct qm_dqrr_entry *dq, + bool sched_napi) { struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); struct dpaa_percpu_priv *percpu_priv; @@ -2345,7 +2432,7 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, percpu_priv = this_cpu_ptr(priv->percpu_priv); - if (dpaa_eth_napi_schedule(percpu_priv, portal)) + if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)) return qman_cb_dqrr_stop; dpaa_eth_refill_bpools(priv); @@ -2354,29 +2441,208 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, return qman_cb_dqrr_consume; } +static int dpaa_xdp_xmit_frame(struct net_device *net_dev, + struct xdp_frame *xdpf) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct rtnl_link_stats64 *percpu_stats; + struct dpaa_percpu_priv *percpu_priv; + struct dpaa_eth_swbp *swbp; + struct netdev_queue *txq; + void *buff_start; + struct qm_fd fd; + dma_addr_t addr; + int err; + + percpu_priv = this_cpu_ptr(priv->percpu_priv); + percpu_stats = &percpu_priv->stats; + +#ifdef CONFIG_DPAA_ERRATUM_A050385 + if (unlikely(fman_has_errata_a050385())) { + if (dpaa_a050385_wa_xdpf(priv, &xdpf)) { + err = -ENOMEM; + goto out_error; + } + } +#endif + + if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) { + err = -EINVAL; + goto out_error; + } + + buff_start = xdpf->data - xdpf->headroom; + + /* Leave empty the skb backpointer at the start of the buffer. + * Save the XDP frame for easy cleanup on confirmation. + */ + swbp = (struct dpaa_eth_swbp *)buff_start; + swbp->skb = NULL; + swbp->xdpf = xdpf; + + qm_fd_clear_fd(&fd); + fd.bpid = FSL_DPAA_BPID_INV; + fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO); + qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len); + + addr = dma_map_single(priv->tx_dma_dev, buff_start, + xdpf->headroom + xdpf->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { + err = -EINVAL; + goto out_error; + } + + qm_fd_addr_set64(&fd, addr); + + /* Bump the trans_start */ + txq = netdev_get_tx_queue(net_dev, smp_processor_id()); + txq->trans_start = jiffies; + + err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd); + if (err) { + dma_unmap_single(priv->tx_dma_dev, addr, + qm_fd_get_offset(&fd) + qm_fd_get_length(&fd), + DMA_TO_DEVICE); + goto out_error; + } + + return 0; + +out_error: + percpu_stats->tx_errors++; + return err; +} + +static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr, + struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len) +{ + ssize_t fd_off = qm_fd_get_offset(fd); + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + struct xdp_buff xdp; + u32 xdp_act; + int err; + + rcu_read_lock(); + + xdp_prog = READ_ONCE(priv->xdp_prog); + if (!xdp_prog) { + rcu_read_unlock(); + return XDP_PASS; + } + + xdp.data = vaddr + fd_off; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + qm_fd_get_length(fd); + xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE; + xdp.rxq = &dpaa_fq->xdp_rxq; + + /* We reserve a fixed headroom of 256 bytes under the erratum and we + * offer it all to XDP programs to use. If no room is left for the + * xdpf backpointer on TX, we will need to copy the data. + * Disable metadata support since data realignments might be required + * and the information can be lost. + */ +#ifdef CONFIG_DPAA_ERRATUM_A050385 + if (unlikely(fman_has_errata_a050385())) { + xdp_set_data_meta_invalid(&xdp); + xdp.data_hard_start = vaddr; + xdp.frame_sz = DPAA_BP_RAW_SIZE; + } +#endif + + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); + + /* Update the length and the offset of the FD */ + qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data); + + switch (xdp_act) { + case XDP_PASS: +#ifdef CONFIG_DPAA_ERRATUM_A050385 + *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 : + xdp.data - xdp.data_meta; +#else + *xdp_meta_len = xdp.data - xdp.data_meta; +#endif + break; + case XDP_TX: + /* We can access the full headroom when sending the frame + * back out + */ + xdp.data_hard_start = vaddr; + xdp.frame_sz = DPAA_BP_RAW_SIZE; + xdpf = xdp_convert_buff_to_frame(&xdp); + if (unlikely(!xdpf)) { + free_pages((unsigned long)vaddr, 0); + break; + } + + if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf)) + xdp_return_frame_rx_napi(xdpf); + + break; + case XDP_REDIRECT: + /* Allow redirect to use the full headroom */ + xdp.data_hard_start = vaddr; + xdp.frame_sz = DPAA_BP_RAW_SIZE; + + err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); + if (err) { + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + free_pages((unsigned long)vaddr, 0); + } + break; + default: + bpf_warn_invalid_xdp_action(xdp_act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + fallthrough; + case XDP_DROP: + /* Free the buffer */ + free_pages((unsigned long)vaddr, 0); + break; + } + + rcu_read_unlock(); + + return xdp_act; +} + static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, struct qman_fq *fq, - const struct qm_dqrr_entry *dq) + const struct qm_dqrr_entry *dq, + bool sched_napi) { + bool ts_valid = false, hash_valid = false; struct skb_shared_hwtstamps *shhwtstamps; + unsigned int skb_len, xdp_meta_len = 0; struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; const struct qm_fd *fd = &dq->fd; dma_addr_t addr = qm_fd_addr(fd); + struct dpaa_napi_portal *np; enum qm_fd_format fd_format; struct net_device *net_dev; u32 fd_status, hash_offset; + struct qm_sg_entry *sgt; struct dpaa_bp *dpaa_bp; + struct dpaa_fq *dpaa_fq; struct dpaa_priv *priv; - unsigned int skb_len; struct sk_buff *skb; int *count_ptr; + u32 xdp_act; void *vaddr; + u32 hash; u64 ns; + np = container_of(&portal, struct dpaa_napi_portal, p); + dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); fd_status = be32_to_cpu(fd->status); fd_format = qm_fd_get_format(fd); - net_dev = ((struct dpaa_fq *)fq)->net_dev; + net_dev = dpaa_fq->net_dev; priv = netdev_priv(net_dev); dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); if (!dpaa_bp) @@ -2388,7 +2654,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, percpu_priv = this_cpu_ptr(priv->percpu_priv); percpu_stats = &percpu_priv->stats; - if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) + if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))) return qman_cb_dqrr_stop; /* Make sure we didn't run out of buffers */ @@ -2427,35 +2693,68 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); (*count_ptr)--; - if (likely(fd_format == qm_fd_contig)) + /* Extract the timestamp stored in the headroom before running XDP */ + if (priv->rx_tstamp) { + if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) + ts_valid = true; + else + WARN_ONCE(1, "fman_port_get_tstamp failed!\n"); + } + + /* Extract the hash stored in the headroom before running XDP */ + if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && + !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], + &hash_offset)) { + hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset)); + hash_valid = true; + } + + if (likely(fd_format == qm_fd_contig)) { + xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr, + dpaa_fq, &xdp_meta_len); + np->xdp_act |= xdp_act; + if (xdp_act != XDP_PASS) { + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += qm_fd_get_length(fd); + return qman_cb_dqrr_consume; + } skb = contig_fd_to_skb(priv, fd); - else + } else { + /* XDP doesn't support S/G frames. Return the fragments to the + * buffer pool and release the SGT. + */ + if (READ_ONCE(priv->xdp_prog)) { + WARN_ONCE(1, "S/G frames not supported under XDP\n"); + sgt = vaddr + qm_fd_get_offset(fd); + dpaa_release_sgt_members(sgt); + free_pages((unsigned long)vaddr, 0); + return qman_cb_dqrr_consume; + } skb = sg_fd_to_skb(priv, fd); + } if (!skb) return qman_cb_dqrr_consume; - if (priv->rx_tstamp) { + if (xdp_meta_len) + skb_metadata_set(skb, xdp_meta_len); + + /* Set the previously extracted timestamp */ + if (ts_valid) { shhwtstamps = skb_hwtstamps(skb); memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - - if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) - shhwtstamps->hwtstamp = ns_to_ktime(ns); - else - dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); + shhwtstamps->hwtstamp = ns_to_ktime(ns); } skb->protocol = eth_type_trans(skb, net_dev); - if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && - !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], - &hash_offset)) { + /* Set the previously extracted hash */ + if (hash_valid) { enum pkt_hash_types type; /* if L4 exists, it was used in the hash generation */ type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; - skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)), - type); + skb_set_hash(skb, hash, type); } skb_len = skb->len; @@ -2473,7 +2772,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, struct qman_fq *fq, - const struct qm_dqrr_entry *dq) + const struct qm_dqrr_entry *dq, + bool sched_napi) { struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev; @@ -2484,7 +2784,7 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, percpu_priv = this_cpu_ptr(priv->percpu_priv); - if (dpaa_eth_napi_schedule(percpu_priv, portal)) + if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)) return qman_cb_dqrr_stop; dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); @@ -2494,7 +2794,8 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, struct qman_fq *fq, - const struct qm_dqrr_entry *dq) + const struct qm_dqrr_entry *dq, + bool sched_napi) { struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev; @@ -2508,7 +2809,7 @@ static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, percpu_priv = this_cpu_ptr(priv->percpu_priv); - if (dpaa_eth_napi_schedule(percpu_priv, portal)) + if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)) return qman_cb_dqrr_stop; dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); @@ -2554,7 +2855,7 @@ static void dpaa_eth_napi_enable(struct dpaa_priv *priv) for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - percpu_priv->np.down = 0; + percpu_priv->np.down = false; napi_enable(&percpu_priv->np.napi); } } @@ -2567,7 +2868,7 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv) for_each_online_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - percpu_priv->np.down = 1; + percpu_priv->np.down = true; napi_disable(&percpu_priv->np.napi); } } @@ -2673,6 +2974,101 @@ static int dpaa_eth_stop(struct net_device *net_dev) return err; } +static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu) +{ + int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom; + + /* We do not support S/G fragments when XDP is enabled. + * Limit the MTU in relation to the buffer size. + */ + if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) { + dev_warn(priv->net_dev->dev.parent, + "The maximum MTU for XDP is %d\n", + max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN); + return false; + } + + return true; +} + +static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + + if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) + return -EINVAL; + + net_dev->mtu = new_mtu; + return 0; +} + +static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf) +{ + struct dpaa_priv *priv = netdev_priv(net_dev); + struct bpf_prog *old_prog; + int err; + bool up; + + /* S/G fragments are not supported in XDP-mode */ + if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) { + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); + return -EINVAL; + } + + up = netif_running(net_dev); + + if (up) + dpaa_eth_stop(net_dev); + + old_prog = xchg(&priv->xdp_prog, bpf->prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (up) { + err = dpaa_open(net_dev); + if (err) { + NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed"); + return err; + } + } + + return 0; +} + +static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return dpaa_setup_xdp(net_dev, xdp); + default: + return -EINVAL; + } +} + +static int dpaa_xdp_xmit(struct net_device *net_dev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct xdp_frame *xdpf; + int i, err, drops = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (!netif_running(net_dev)) + return -ENETDOWN; + + for (i = 0; i < n; i++) { + xdpf = frames[i]; + err = dpaa_xdp_xmit_frame(net_dev, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + return n - drops; +} + static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct dpaa_priv *priv = netdev_priv(dev); @@ -2739,6 +3135,9 @@ static const struct net_device_ops dpaa_ops = { .ndo_set_rx_mode = dpaa_set_rx_mode, .ndo_do_ioctl = dpaa_ioctl, .ndo_setup_tc = dpaa_setup_tc, + .ndo_change_mtu = dpaa_change_mtu, + .ndo_bpf = dpaa_xdp, + .ndo_xdp_xmit = dpaa_xdp_xmit, }; static int dpaa_napi_add(struct net_device *net_dev) @@ -2870,10 +3269,16 @@ static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl, */ headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE); - if (port == RX) + if (port == RX) { +#ifdef CONFIG_DPAA_ERRATUM_A050385 + if (unlikely(fman_has_errata_a050385())) + headroom = XDP_PACKET_HEADROOM; +#endif + return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT); - else + } else { return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); + } } static int dpaa_eth_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index fc2cc4c48e06..daf894a97050 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -68,6 +68,7 @@ struct dpaa_fq { u16 channel; u8 wq; enum dpaa_fq_type fq_type; + struct xdp_rxq_info xdp_rxq; }; struct dpaa_fq_cbs { @@ -126,6 +127,7 @@ struct dpaa_napi_portal { struct napi_struct napi; struct qman_portal *p; bool down; + int xdp_act; }; struct dpaa_percpu_priv { @@ -144,6 +146,15 @@ struct dpaa_buffer_layout { u16 priv_data_size; }; +/* Information to be used on the Tx confirmation path. Stored just + * before the start of the transmit buffer. Maximum size allowed + * is DPAA_TX_PRIV_DATA_SIZE bytes. + */ +struct dpaa_eth_swbp { + struct sk_buff *skb; + struct xdp_frame *xdpf; +}; + struct dpaa_priv { struct dpaa_percpu_priv __percpu *percpu_priv; struct dpaa_bp *dpaa_bp; @@ -188,6 +199,8 @@ struct dpaa_priv { bool tx_tstamp; /* Tx timestamping enabled */ bool rx_tstamp; /* Rx timestamping enabled */ + + struct bpf_prog *xdp_prog; }; /* from dpaa_ethtool.c */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index cf9400a9886d..91cff93dbdae 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -686,7 +686,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, &offset1, &offset2) || - msgtype != 0 || twostep) { + msgtype != PTP_MSGTYPE_SYNC || twostep) { WARN_ONCE(1, "Bad packet for one-step timestamping\n"); return; } @@ -1212,7 +1212,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, &offset1, &offset2)) - if (msgtype == 0 && twostep == 0) { + if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) { skb_queue_tail(&priv->tx_skbs, skb); queue_work(priv->dpaa2_ptp_wq, &priv->tx_onestep_tstamp); @@ -3334,7 +3334,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, return 0; err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, - fq->flowid); + fq->flowid, 0); if (err) { dev_err(dev, "xdp_rxq_info_reg failed\n"); return err; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index fc2075ea57fe..c78d12229730 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -50,40 +50,6 @@ drop_packet_err: return NETDEV_TX_OK; } -static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd) -{ - int l3_start, l3_hsize; - u16 l3_flags, l4_flags; - - if (skb->ip_summed != CHECKSUM_PARTIAL) - return false; - - switch (skb->csum_offset) { - case offsetof(struct tcphdr, check): - l4_flags = ENETC_TXBD_L4_TCP; - break; - case offsetof(struct udphdr, check): - l4_flags = ENETC_TXBD_L4_UDP; - break; - default: - skb_checksum_help(skb); - return false; - } - - l3_start = skb_network_offset(skb); - l3_hsize = skb_network_header_len(skb); - - l3_flags = 0; - if (skb->protocol == htons(ETH_P_IPV6)) - l3_flags = ENETC_TXBD_L3_IPV6; - - /* write BD fields */ - txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags); - txbd->l4_csoff = l4_flags; - - return true; -} - static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, struct enetc_tx_swbd *tx_swbd) { @@ -149,22 +115,16 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, if (do_vlan || do_tstamp) flags |= ENETC_TXBD_FLAGS_EX; - if (enetc_tx_csum(skb, &temp_bd)) - flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS; - else if (tx_ring->tsd_enable) + if (tx_ring->tsd_enable) flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART; /* first BD needs frm_len and offload flags set */ temp_bd.frm_len = cpu_to_le16(skb->len); temp_bd.flags = flags; - if (flags & ENETC_TXBD_FLAGS_TSE) { - u32 temp; - - temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK) - | (flags << ENETC_TXBD_FLAGS_OFFSET); - temp_bd.txstart = cpu_to_le32(temp); - } + if (flags & ENETC_TXBD_FLAGS_TSE) + temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns, + flags); if (flags & ENETC_TXBD_FLAGS_EX) { u8 e_flags = 0; @@ -1925,8 +1885,7 @@ static void enetc_kfree_si(struct enetc_si *si) static void enetc_detect_errata(struct enetc_si *si) { if (si->pdev->revision == ENETC_REV1) - si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL | - ENETC_ERR_UCMCSWP; + si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP; } int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index dd0fb0c066d7..8532d23b54f5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -147,9 +147,8 @@ struct enetc_msg_swbd { #define ENETC_REV1 0x1 enum enetc_errata { - ENETC_ERR_TXCSUM = BIT(0), - ENETC_ERR_VLAN_ISOL = BIT(1), - ENETC_ERR_UCMCSWP = BIT(2), + ENETC_ERR_VLAN_ISOL = BIT(0), + ENETC_ERR_UCMCSWP = BIT(1), }; #define ENETC_SI_F_QBV BIT(0) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 4cbf1667d7ff..e1e950d48c92 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -481,8 +481,7 @@ union enetc_tx_bd { __le16 frm_len; union { struct { - __le16 l3_csoff; - u8 l4_csoff; + u8 reserved[3]; u8 flags; }; /* default layout */ __le32 txstart; @@ -505,41 +504,37 @@ union enetc_tx_bd { } wb; /* writeback descriptor */ }; -#define ENETC_TXBD_FLAGS_L4CS BIT(0) -#define ENETC_TXBD_FLAGS_TSE BIT(1) -#define ENETC_TXBD_FLAGS_W BIT(2) -#define ENETC_TXBD_FLAGS_CSUM BIT(3) -#define ENETC_TXBD_FLAGS_TXSTART BIT(4) -#define ENETC_TXBD_FLAGS_EX BIT(6) -#define ENETC_TXBD_FLAGS_F BIT(7) +enum enetc_txbd_flags { + ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */ + ENETC_TXBD_FLAGS_TSE = BIT(1), + ENETC_TXBD_FLAGS_W = BIT(2), + ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */ + ENETC_TXBD_FLAGS_TXSTART = BIT(4), + ENETC_TXBD_FLAGS_EX = BIT(6), + ENETC_TXBD_FLAGS_F = BIT(7) +}; #define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0) #define ENETC_TXBD_FLAGS_OFFSET 24 + +static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags) +{ + u32 temp; + + temp = (tx_start >> 5 & ENETC_TXBD_TXSTART_MASK) | + (flags << ENETC_TXBD_FLAGS_OFFSET); + + return cpu_to_le32(temp); +} + static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd) { memset(txbd, 0, sizeof(*txbd)); } -/* L3 csum flags */ -#define ENETC_TXBD_L3_IPCS BIT(7) -#define ENETC_TXBD_L3_IPV6 BIT(15) - -#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0) -#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8) - /* Extension flags */ #define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0) #define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2) -static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags) -{ - return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) | - (start & ENETC_TXBD_L3_START_MASK)); -} - -/* L4 csum flags */ -#define ENETC_TXBD_L4_UDP BIT(5) -#define ENETC_TXBD_L4_TCP BIT(6) - union enetc_rx_bd { struct { __le64 addr; @@ -584,10 +579,10 @@ struct enetc_cmd_rfse { u8 smac_m[6]; u8 dmac_h[6]; u8 dmac_m[6]; - u32 sip_h[4]; - u32 sip_m[4]; - u32 dip_h[4]; - u32 dip_m[4]; + __be32 sip_h[4]; + __be32 sip_m[4]; + __be32 dip_h[4]; + __be32 dip_m[4]; u16 ethtype_h; u16 ethtype_m; u16 ethtype4_h; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 419306342ac5..ed8fcb8b486e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -714,22 +714,16 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->watchdog_timeo = 5 * HZ; ndev->max_mtu = ENETC_MAX_MTU; - ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK; - ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | - NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (si->num_rss) ndev->hw_features |= NETIF_F_RXHASH; - if (si->errata & ENETC_ERR_TXCSUM) { - ndev->hw_features &= ~NETIF_F_HW_CSUM; - ndev->features &= ~NETIF_F_HW_CSUM; - } - ndev->priv_flags |= IFF_UNICAST_FLT; if (si->hw_features & ENETC_SI_F_QBV) @@ -857,13 +851,12 @@ static bool enetc_port_has_pcs(struct enetc_pf *pf) pf->if_mode == PHY_INTERFACE_MODE_USXGMII); } -static int enetc_mdiobus_create(struct enetc_pf *pf) +static int enetc_mdiobus_create(struct enetc_pf *pf, struct device_node *node) { - struct device *dev = &pf->si->pdev->dev; struct device_node *mdio_np; int err; - mdio_np = of_get_child_by_name(dev->of_node, "mdio"); + mdio_np = of_get_child_by_name(node, "mdio"); if (mdio_np) { err = enetc_mdio_probe(pf, mdio_np); @@ -975,18 +968,17 @@ static const struct phylink_mac_ops enetc_mac_phylink_ops = { .mac_link_down = enetc_pl_mac_link_down, }; -static int enetc_phylink_create(struct enetc_ndev_priv *priv) +static int enetc_phylink_create(struct enetc_ndev_priv *priv, + struct device_node *node) { struct enetc_pf *pf = enetc_si_priv(priv->si); - struct device *dev = &pf->si->pdev->dev; struct phylink *phylink; int err; pf->phylink_config.dev = &priv->ndev->dev; pf->phylink_config.type = PHYLINK_NETDEV; - phylink = phylink_create(&pf->phylink_config, - of_fwnode_handle(dev->of_node), + phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node), pf->if_mode, &enetc_mac_phylink_ops); if (IS_ERR(phylink)) { err = PTR_ERR(phylink); @@ -1007,13 +999,14 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv) static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct device_node *node = pdev->dev.of_node; struct enetc_ndev_priv *priv; struct net_device *ndev; struct enetc_si *si; struct enetc_pf *pf; int err; - if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + if (node && !of_device_is_available(node)) { dev_info(&pdev->dev, "device is disabled, skipping\n"); return -ENODEV; } @@ -1064,12 +1057,12 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_msix; } - if (!of_get_phy_mode(pdev->dev.of_node, &pf->if_mode)) { - err = enetc_mdiobus_create(pf); + if (!of_get_phy_mode(node, &pf->if_mode)) { + err = enetc_mdiobus_create(pf, node); if (err) goto err_mdiobus_create; - err = enetc_phylink_create(priv); + err = enetc_phylink_create(priv, node); if (err) goto err_phylink_create; } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index dbceb99c4441..a9aee219fb58 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -118,8 +118,8 @@ static int enetc_setup_taprio(struct net_device *ndev, return -ENOMEM; } - cbd.addr[0] = lower_32_bits(dma); - cbd.addr[1] = upper_32_bits(dma); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); cbd.cls = BDCR_CMD_PORT_GCL; cbd.status_flags = 0; @@ -496,16 +496,15 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, return -ENOMEM; } - cbd.addr[0] = lower_32_bits(dma); - cbd.addr[1] = upper_32_bits(dma); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); eth_broadcast_addr(si_data->dmac); - si_data->vid_vidm_tg = - cpu_to_le16(ENETC_CBDR_SID_VID_MASK - + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); + si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK + + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); si_conf = &cbd.sid_set; /* Only one port supported for one entry, set itself */ - si_conf->iports = 1 << enetc_get_port(priv); + si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); si_conf->id_type = 1; si_conf->oui[2] = 0x0; si_conf->oui[1] = 0x80; @@ -530,7 +529,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, si_conf->en = 0x80; si_conf->stream_handle = cpu_to_le32(sid->handle); - si_conf->iports = 1 << enetc_get_port(priv); + si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); si_conf->id_type = sid->filtertype; si_conf->oui[2] = 0x0; si_conf->oui[1] = 0x80; @@ -540,8 +539,8 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, cbd.length = cpu_to_le16(data_size); - cbd.addr[0] = lower_32_bits(dma); - cbd.addr[1] = upper_32_bits(dma); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); /* VIDM default to be 1. * VID Match. If set (b1) then the VID must match, otherwise @@ -550,16 +549,14 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, */ if (si_conf->id_type == STREAMID_TYPE_NULL) { ether_addr_copy(si_data->dmac, sid->dst_mac); - si_data->vid_vidm_tg = - cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + - ((((u16)(sid->tagged) & 0x3) << 14) - | ENETC_CBDR_SID_VIDM)); + si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + + ((((u16)(sid->tagged) & 0x3) << 14) + | ENETC_CBDR_SID_VIDM); } else if (si_conf->id_type == STREAMID_TYPE_SMAC) { ether_addr_copy(si_data->smac, sid->src_mac); - si_data->vid_vidm_tg = - cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) + - ((((u16)(sid->tagged) & 0x3) << 14) - | ENETC_CBDR_SID_VIDM)); + si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) + + ((((u16)(sid->tagged) & 0x3) << 14) + | ENETC_CBDR_SID_VIDM); } err = enetc_send_cmd(priv->si, &cbd); @@ -594,7 +591,7 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, } sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id); - sfi_config->input_ports = 1 << enetc_get_port(priv); + sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv)); /* The priority value which may be matched against the * frame’s priority value to determine a match for this entry. @@ -648,8 +645,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, err = -ENOMEM; goto exit; } - cbd.addr[0] = lower_32_bits(dma); - cbd.addr[1] = upper_32_bits(dma); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); cbd.length = cpu_to_le16(data_size); @@ -657,28 +654,25 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv, if (err) goto exit; - cnt->matching_frames_count = - ((u64)le32_to_cpu(data_buf->matchh) << 32) - + data_buf->matchl; + cnt->matching_frames_count = ((u64)data_buf->matchh << 32) + + data_buf->matchl; - cnt->not_passing_sdu_count = - ((u64)le32_to_cpu(data_buf->msdu_droph) << 32) - + data_buf->msdu_dropl; + cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) + + data_buf->msdu_dropl; cnt->passing_sdu_count = cnt->matching_frames_count - cnt->not_passing_sdu_count; cnt->not_passing_frames_count = - ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32) - + le32_to_cpu(data_buf->stream_gate_dropl); + ((u64)data_buf->stream_gate_droph << 32) + + data_buf->stream_gate_dropl; - cnt->passing_frames_count = cnt->matching_frames_count - - cnt->not_passing_sdu_count - - cnt->not_passing_frames_count; + cnt->passing_frames_count = cnt->matching_frames_count - + cnt->not_passing_sdu_count - + cnt->not_passing_frames_count; - cnt->red_frames_count = - ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32) - + le32_to_cpu(data_buf->flow_meter_dropl); + cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) + + data_buf->flow_meter_dropl; exit: kfree(data_buf); @@ -785,15 +779,15 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, return -ENOMEM; } - cbd.addr[0] = lower_32_bits(dma); - cbd.addr[1] = upper_32_bits(dma); + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma)); sgce = &sgcl_data->sgcl[0]; sgcl_config->agtst = 0x80; - sgcl_data->ct = cpu_to_le32(sgi->cycletime); - sgcl_data->cte = cpu_to_le32(sgi->cycletimext); + sgcl_data->ct = sgi->cycletime; + sgcl_data->cte = sgi->cycletimext; if (sgi->init_ipv >= 0) sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8; @@ -815,7 +809,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, to->msdu[2] = (from->maxoctets >> 16) & 0xFF; } - to->interval = cpu_to_le32(from->interval); + to->interval = from->interval; } /* If basetime is less than now, calculate start time */ @@ -827,15 +821,15 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, err = get_start_ns(now, sgi->cycletime, &start); if (err) goto exit; - sgcl_data->btl = cpu_to_le32(lower_32_bits(start)); - sgcl_data->bth = cpu_to_le32(upper_32_bits(start)); + sgcl_data->btl = lower_32_bits(start); + sgcl_data->bth = upper_32_bits(start); } else { u32 hi, lo; hi = upper_32_bits(sgi->basetime); lo = lower_32_bits(sgi->basetime); - sgcl_data->bth = cpu_to_le32(hi); - sgcl_data->btl = cpu_to_le32(lo); + sgcl_data->bth = hi; + sgcl_data->btl = lo; } err = enetc_send_cmd(priv->si, &cbd); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 7b5c82c7e4e5..39c1a09e69a9 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -120,22 +120,16 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, ndev->watchdog_timeo = 5 * HZ; ndev->max_mtu = ENETC_MAX_MTU; - ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | - NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; if (si->num_rss) ndev->hw_features |= NETIF_F_RXHASH; - if (si->errata & ENETC_ERR_TXCSUM) { - ndev->hw_features &= ~NETIF_F_HW_CSUM; - ndev->features &= ~NETIF_F_HW_CSUM; - } - /* pick up primary MAC address from SI */ enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); } diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index c6481bd61239..9d58d8334467 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -430,7 +430,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) return -ENOMEM; priv = new_bus->priv; - new_bus->name = "Freescale PowerQUICC MII Bus", + new_bus->name = "Freescale PowerQUICC MII Bus"; new_bus->read = &fsl_pq_mdio_read; new_bus->write = &fsl_pq_mdio_write; new_bus->reset = &fsl_pq_mdio_reset; diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 3fe903972195..1a9bdf66a7d8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -882,7 +882,6 @@ struct ucc_geth_hardware_statistics { addresses */ #define TX_TIMEOUT (1*HZ) -#define SKB_ALLOC_TIMEOUT 100000 #define PHY_INIT_TIMEOUT 100000 #define PHY_CHANGE_TIME 2 diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index f5c80229ea96..daf07c0f790b 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -38,6 +38,8 @@ #define NIC_TX_STATS_REPORT_NUM 0 #define NIC_RX_STATS_REPORT_NUM 4 +#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1)) + /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */ struct gve_rx_desc_queue { struct gve_rx_desc *desc_ring; /* the descriptor ring */ @@ -49,7 +51,8 @@ struct gve_rx_desc_queue { struct gve_rx_slot_page_info { struct page *page; void *page_address; - u32 page_offset; /* offset to write to in page */ + u8 page_offset; /* flipped to second half? */ + u8 can_flip; }; /* A list of pages registered with the device during setup and used by a queue @@ -64,10 +67,11 @@ struct gve_queue_page_list { /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */ struct gve_rx_data_queue { - struct gve_rx_data_slot *data_ring; /* read by NIC */ + union gve_rx_data_slot *data_ring; /* read by NIC */ dma_addr_t data_bus; /* dma mapping of the slots */ struct gve_rx_slot_page_info *page_info; /* page info of the buffers */ struct gve_queue_page_list *qpl; /* qpl assigned to this queue */ + u8 raw_addressing; /* use raw_addressing? */ }; struct gve_priv; @@ -82,6 +86,7 @@ struct gve_rx_ring { u32 cnt; /* free-running total number of completed packets */ u32 fill_cnt; /* free-running total number of descs and buffs posted */ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ + u32 db_threshold; /* threshold for posting new buffs and descs */ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ u64 rx_copied_pkt; /* free-running total number of copied packets */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ @@ -107,12 +112,20 @@ struct gve_tx_iovec { u32 iov_padding; /* padding associated with this segment */ }; +struct gve_tx_dma_buf { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc * ring entry but only used for a pkt_desc not a seg_desc */ struct gve_tx_buffer_state { struct sk_buff *skb; /* skb for this pkt */ - struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + union { + struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ + struct gve_tx_dma_buf buf; + }; }; /* A TX buffer - each queue has one */ @@ -135,13 +148,17 @@ struct gve_tx_ring { __be32 last_nic_done ____cacheline_aligned; /* NIC tail pointer */ u64 pkt_done; /* free-running - total packets completed */ u64 bytes_done; /* free-running - total bytes completed */ + u64 dropped_pkt; /* free-running - total packets dropped */ + u64 dma_mapping_error; /* count of dma mapping errors */ /* Cacheline 2 -- Read-mostly fields */ union gve_tx_desc *desc ____cacheline_aligned; struct gve_tx_buffer_state *info; /* Maps 1:1 to a desc */ struct netdev_queue *netdev_txq; struct gve_queue_resources *q_resources; /* head and tail pointer idx */ + struct device *dev; u32 mask; /* masks req and done down to queue size */ + u8 raw_addressing; /* use raw_addressing? */ /* Slow-path fields */ u32 q_num ____cacheline_aligned; /* queue idx */ @@ -194,11 +211,12 @@ struct gve_priv { u16 tx_desc_cnt; /* num desc per ring */ u16 rx_desc_cnt; /* num desc per ring */ u16 tx_pages_per_qpl; /* tx buffer length */ - u16 rx_pages_per_qpl; /* rx buffer length */ + u16 rx_data_slot_cnt; /* rx buffer length */ u64 max_registered_pages; u64 num_registered_pages; /* num pages registered with NIC */ u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ + u8 raw_addressing; /* 1 if this dev supports raw addressing, 0 otherwise */ struct gve_queue_config tx_cfg; struct gve_queue_config rx_cfg; @@ -436,14 +454,14 @@ static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx) */ static inline u32 gve_num_tx_qpls(struct gve_priv *priv) { - return priv->tx_cfg.num_queues; + return priv->raw_addressing ? 0 : priv->tx_cfg.num_queues; } /* Returns the number of rx queue page lists */ static inline u32 gve_num_rx_qpls(struct gve_priv *priv) { - return priv->rx_cfg.num_queues; + return priv->raw_addressing ? 0 : priv->rx_cfg.num_queues; } /* Returns a pointer to the next available tx qpl in the list of qpls @@ -497,15 +515,6 @@ static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, return DMA_FROM_DEVICE; } -/* Returns true if the max mtu allows page recycling */ -static inline bool gve_can_recycle_pages(struct net_device *dev) -{ - /* We can't recycle the pages if we can't fit a packet into half a - * page. - */ - return dev->max_mtu <= PAGE_SIZE / 2; -} - /* buffers */ int gve_alloc_page(struct gve_priv *priv, struct device *dev, struct page **page, dma_addr_t *dma, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 24ae6a28a806..53864f200599 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -14,6 +14,57 @@ #define GVE_ADMINQ_SLEEP_LEN 20 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 +#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \ +"Expected: length=%d, feature_mask=%x.\n" \ +"Actual: length=%d, feature_mask=%x.\n" + +static +struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor, + struct gve_device_option *option) +{ + void *option_end, *descriptor_end; + + option_end = (void *)(option + 1) + be16_to_cpu(option->option_length); + descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length); + + return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; +} + +static +void gve_parse_device_option(struct gve_priv *priv, + struct gve_device_descriptor *device_descriptor, + struct gve_device_option *option) +{ + u16 option_length = be16_to_cpu(option->option_length); + u16 option_id = be16_to_cpu(option->option_id); + + switch (option_id) { + case GVE_DEV_OPT_ID_RAW_ADDRESSING: + /* If the length or feature mask doesn't match, + * continue without enabling the feature. + */ + if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING || + option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing", + GVE_DEV_OPT_LEN_RAW_ADDRESSING, + cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING), + option_length, option->feat_mask); + priv->raw_addressing = 0; + } else { + dev_info(&priv->pdev->dev, + "Raw addressing device option enabled.\n"); + priv->raw_addressing = 1; + } + break; + default: + /* If we don't recognize the option just continue + * without doing anything. + */ + dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n", + option_id); + } +} + int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) { priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE, @@ -318,8 +369,10 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) { struct gve_tx_ring *tx = &priv->tx[queue_index]; union gve_adminq_command cmd; + u32 qpl_id; int err; + qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { @@ -328,7 +381,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) .queue_resources_addr = cpu_to_be64(tx->q_resources_bus), .tx_ring_addr = cpu_to_be64(tx->bus), - .queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id), + .queue_page_list_id = cpu_to_be32(qpl_id), .ntfy_id = cpu_to_be32(tx->ntfy_id), }; @@ -357,8 +410,10 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) { struct gve_rx_ring *rx = &priv->rx[queue_index]; union gve_adminq_command cmd; + u32 qpl_id; int err; + qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; memset(&cmd, 0, sizeof(cmd)); cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) { @@ -369,7 +424,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus), .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus), - .queue_page_list_id = cpu_to_be32(rx->data.qpl->id), + .queue_page_list_id = cpu_to_be32(qpl_id), }; err = gve_adminq_issue_cmd(priv, &cmd); @@ -460,11 +515,14 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_describe_device(struct gve_priv *priv) { struct gve_device_descriptor *descriptor; + struct gve_device_option *dev_opt; union gve_adminq_command cmd; dma_addr_t descriptor_bus; + u16 num_options; int err = 0; u8 *mac; u16 mtu; + int i; memset(&cmd, 0, sizeof(cmd)); descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE, @@ -511,13 +569,30 @@ int gve_adminq_describe_device(struct gve_priv *priv) mac = descriptor->mac; dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); - priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl); - if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) { - dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", - priv->rx_pages_per_qpl); - priv->rx_desc_cnt = priv->rx_pages_per_qpl; + priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl); + if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) { + dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n", + priv->rx_data_slot_cnt); + priv->rx_desc_cnt = priv->rx_data_slot_cnt; } priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); + dev_opt = (void *)(descriptor + 1); + + num_options = be16_to_cpu(descriptor->num_device_options); + for (i = 0; i < num_options; i++) { + struct gve_device_option *next_opt; + + next_opt = gve_get_next_option(descriptor, dev_opt); + if (!next_opt) { + dev_err(&priv->dev->dev, + "options exceed device_descriptor's total length.\n"); + err = -EINVAL; + goto free_device_descriptor; + } + + gve_parse_device_option(priv, descriptor, dev_opt); + dev_opt = next_opt; + } free_device_descriptor: dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor, diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index 015796a20118..d320c2ffd87c 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -79,12 +79,17 @@ struct gve_device_descriptor { static_assert(sizeof(struct gve_device_descriptor) == 40); -struct device_option { - __be32 option_id; - __be32 option_length; +struct gve_device_option { + __be16 option_id; + __be16 option_length; + __be32 feat_mask; }; -static_assert(sizeof(struct device_option) == 8); +static_assert(sizeof(struct gve_device_option) == 8); + +#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1 +#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0 +#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0 struct gve_adminq_configure_device_resources { __be64 counter_array; @@ -111,6 +116,8 @@ struct gve_adminq_unregister_page_list { static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4); +#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF + struct gve_adminq_create_tx_queue { __be32 queue_id; __be32 reserved; diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h index 54779871d52e..05ae6300e984 100644 --- a/drivers/net/ethernet/google/gve/gve_desc.h +++ b/drivers/net/ethernet/google/gve/gve_desc.h @@ -16,9 +16,11 @@ * Base addresses encoded in seg_addr are not assumed to be physical * addresses. The ring format assumes these come from some linear address * space. This could be physical memory, kernel virtual memory, user virtual - * memory. gVNIC uses lists of registered pages. Each queue is assumed - * to be associated with a single such linear address space to ensure a - * consistent meaning for seg_addrs posted to its rings. + * memory. + * If raw dma addressing is not supported then gVNIC uses lists of registered + * pages. Each queue is assumed to be associated with a single such linear + * address space to ensure a consistent meaning for seg_addrs posted to its + * rings. */ struct gve_tx_pkt_desc { @@ -72,12 +74,15 @@ struct gve_rx_desc { } __packed; static_assert(sizeof(struct gve_rx_desc) == 64); -/* As with the Tx ring format, the qpl_offset entries below are offsets into an - * ordered list of registered pages. +/* If the device supports raw dma addressing then the addr in data slot is + * the dma address of the buffer. + * If the device only supports registered segments then the addr is a byte + * offset into the registered segment (an ordered list of pages) where the + * buffer is. */ -struct gve_rx_data_slot { - /* byte offset into the rx registered segment of this slot */ +union gve_rx_data_slot { __be64 qpl_offset; + __be64 addr; }; /* GVE Recive Packet Descriptor Seq No */ diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 7b44769bd87c..0901fa6853ca 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -4,6 +4,7 @@ * Copyright (C) 2015-2019 Google, Inc. */ +#include <linux/ethtool.h> #include <linux/rtnetlink.h> #include "gve.h" #include "gve_adminq.h" @@ -50,6 +51,7 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", + "tx_dma_mapping_error[%u]", }; static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { @@ -322,6 +324,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = tx->stop_queue; data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, tx)); + data[i++] = tx->dma_mapping_error; /* stats from NIC */ if (skip_nic_stats) { /* skip NIC tx stats */ diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 02e7d74779f4..7302498c6df3 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -677,6 +677,10 @@ static int gve_alloc_qpls(struct gve_priv *priv) int i, j; int err; + /* Raw addressing means no QPLs */ + if (priv->raw_addressing) + return 0; + priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL); if (!priv->qpls) return -ENOMEM; @@ -689,7 +693,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) } for (; i < num_qpls; i++) { err = gve_alloc_queue_page_list(priv, i, - priv->rx_pages_per_qpl); + priv->rx_data_slot_cnt); if (err) goto free_qpls; } @@ -717,6 +721,10 @@ static void gve_free_qpls(struct gve_priv *priv) int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); int i; + /* Raw addressing means no QPLs */ + if (priv->raw_addressing) + return; + kvfree(priv->qpl_cfg.qpl_id_map); for (i = 0; i < num_qpls; i++) @@ -1077,6 +1085,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) if (skip_describe_device) goto setup_device; + priv->raw_addressing = false; /* Get the initial information we need from the device */ err = gve_adminq_describe_device(priv); if (err) { diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 008fa897a3e6..bf123fe524c4 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -16,12 +16,39 @@ static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) block->rx = NULL; } +static void gve_rx_free_buffer(struct device *dev, + struct gve_rx_slot_page_info *page_info, + union gve_rx_data_slot *data_slot) +{ + dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & + GVE_DATA_SLOT_ADDR_PAGE_MASK); + + gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); +} + +static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + if (rx->data.raw_addressing) { + u32 slots = rx->mask + 1; + int i; + + for (i = 0; i < slots; i++) + gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], + &rx->data.data_ring[i]); + } else { + gve_unassign_qpl(priv, rx->data.qpl->id); + rx->data.qpl = NULL; + } + kvfree(rx->data.page_info); + rx->data.page_info = NULL; +} + static void gve_rx_free_ring(struct gve_priv *priv, int idx) { struct gve_rx_ring *rx = &priv->rx[idx]; struct device *dev = &priv->pdev->dev; + u32 slots = rx->mask + 1; size_t bytes; - u32 slots; gve_rx_remove_from_block(priv, idx); @@ -33,11 +60,8 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx) rx->q_resources, rx->q_resources_bus); rx->q_resources = NULL; - gve_unassign_qpl(priv, rx->data.qpl->id); - rx->data.qpl = NULL; - kvfree(rx->data.page_info); + gve_rx_unfill_pages(priv, rx); - slots = rx->mask + 1; bytes = sizeof(*rx->data.data_ring) * slots; dma_free_coherent(dev, bytes, rx->data.data_ring, rx->data.data_bus); @@ -46,19 +70,35 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx) } static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, - struct gve_rx_data_slot *slot, - dma_addr_t addr, struct page *page) + dma_addr_t addr, struct page *page, __be64 *slot_addr) { page_info->page = page; page_info->page_offset = 0; page_info->page_address = page_address(page); - slot->qpl_offset = cpu_to_be64(addr); + *slot_addr = cpu_to_be64(addr); +} + +static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, + struct gve_rx_slot_page_info *page_info, + union gve_rx_data_slot *data_slot) +{ + struct page *page; + dma_addr_t dma; + int err; + + err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE); + if (err) + return err; + + gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); + return 0; } static int gve_prefill_rx_pages(struct gve_rx_ring *rx) { struct gve_priv *priv = rx->gve; u32 slots; + int err; int i; /* Allocate one page per Rx queue slot. Each page is split into two @@ -71,17 +111,30 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) if (!rx->data.page_info) return -ENOMEM; - rx->data.qpl = gve_assign_rx_qpl(priv); - + if (!rx->data.raw_addressing) + rx->data.qpl = gve_assign_rx_qpl(priv); for (i = 0; i < slots; i++) { - struct page *page = rx->data.qpl->pages[i]; - dma_addr_t addr = i * PAGE_SIZE; + if (!rx->data.raw_addressing) { + struct page *page = rx->data.qpl->pages[i]; + dma_addr_t addr = i * PAGE_SIZE; - gve_setup_rx_buffer(&rx->data.page_info[i], - &rx->data.data_ring[i], addr, page); + gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, + &rx->data.data_ring[i].qpl_offset); + continue; + } + err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i], + &rx->data.data_ring[i]); + if (err) + goto alloc_err; } return slots; +alloc_err: + while (i--) + gve_rx_free_buffer(&priv->pdev->dev, + &rx->data.page_info[i], + &rx->data.data_ring[i]); + return err; } static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) @@ -110,8 +163,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) rx->gve = priv; rx->q_num = idx; - slots = priv->rx_pages_per_qpl; + slots = priv->rx_data_slot_cnt; rx->mask = slots - 1; + rx->data.raw_addressing = priv->raw_addressing; /* alloc rx data ring */ bytes = sizeof(*rx->data.data_ring) * slots; @@ -156,8 +210,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) err = -ENOMEM; goto abort_with_q_resources; } - rx->mask = slots - 1; rx->cnt = 0; + rx->db_threshold = priv->rx_desc_cnt / 2; rx->desc.seqno = 1; gve_rx_add_to_block(priv, idx); @@ -168,7 +222,7 @@ abort_with_q_resources: rx->q_resources, rx->q_resources_bus); rx->q_resources = NULL; abort_filled: - kvfree(rx->data.page_info); + gve_rx_unfill_pages(priv, rx); abort_with_slots: bytes = sizeof(*rx->data.data_ring) * slots; dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus); @@ -225,15 +279,14 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) return PKT_HASH_TYPE_L2; } -static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx, - struct net_device *dev, +static struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len) { struct sk_buff *skb = napi_alloc_skb(napi, len); void *va = page_info->page_address + GVE_RX_PAD + - page_info->page_offset; + (page_info->page_offset ? PAGE_SIZE / 2 : 0); if (unlikely(!skb)) return NULL; @@ -244,15 +297,10 @@ static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx, skb->protocol = eth_type_trans(skb, dev); - u64_stats_update_begin(&rx->statss); - rx->rx_copied_pkt++; - u64_stats_update_end(&rx->statss); - return skb; } -static struct sk_buff *gve_rx_add_frags(struct net_device *dev, - struct napi_struct *napi, +static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len) { @@ -262,20 +310,92 @@ static struct sk_buff *gve_rx_add_frags(struct net_device *dev, return NULL; skb_add_rx_frag(skb, 0, page_info->page, - page_info->page_offset + + (page_info->page_offset ? PAGE_SIZE / 2 : 0) + GVE_RX_PAD, len, PAGE_SIZE / 2); return skb; } -static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, - struct gve_rx_data_slot *data_ring) +static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr) +{ + const __be64 offset = cpu_to_be64(PAGE_SIZE / 2); + + /* "flip" to other packet buffer on this page */ + page_info->page_offset ^= 0x1; + *(slot_addr) ^= offset; +} + +static bool gve_rx_can_flip_buffers(struct net_device *netdev) +{ + return PAGE_SIZE == 4096 + ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false; +} + +static int gve_rx_can_recycle_buffer(struct page *page) +{ + int pagecount = page_count(page); + + /* This page is not being used by any SKBs - reuse */ + if (pagecount == 1) + return 1; + /* This page is still being used by an SKB - we can't reuse */ + else if (pagecount >= 2) + return 0; + WARN(pagecount < 1, "Pagecount should never be < 1"); + return -1; +} + +static struct sk_buff * +gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, + struct gve_rx_slot_page_info *page_info, u16 len, + struct napi_struct *napi, + union gve_rx_data_slot *data_slot) { - u64 addr = be64_to_cpu(data_ring->qpl_offset); + struct sk_buff *skb; + + skb = gve_rx_add_frags(napi, page_info, len); + if (!skb) + return NULL; - page_info->page_offset ^= PAGE_SIZE / 2; - addr ^= PAGE_SIZE / 2; - data_ring->qpl_offset = cpu_to_be64(addr); + /* Optimistically stop the kernel from freeing the page by increasing + * the page bias. We will check the refcount in refill to determine if + * we need to alloc a new page. + */ + get_page(page_info->page); + + return skb; +} + +static struct sk_buff * +gve_rx_qpl(struct device *dev, struct net_device *netdev, + struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info, + u16 len, struct napi_struct *napi, + union gve_rx_data_slot *data_slot) +{ + struct sk_buff *skb; + + /* if raw_addressing mode is not enabled gvnic can only receive into + * registered segments. If the buffer can't be recycled, our only + * choice is to copy the data out of it so that we can return it to the + * device. + */ + if (page_info->can_flip) { + skb = gve_rx_add_frags(napi, page_info, len); + /* No point in recycling if we didn't get the skb */ + if (skb) { + /* Make sure that the page isn't freed. */ + get_page(page_info->page); + gve_rx_flip_buff(page_info, &data_slot->qpl_offset); + } + } else { + skb = gve_rx_copy(netdev, napi, page_info, len); + if (skb) { + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + u64_stats_update_end(&rx->statss); + } + } + return skb; } static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, @@ -285,8 +405,9 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, struct gve_priv *priv = rx->gve; struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi; struct net_device *dev = priv->dev; - struct sk_buff *skb; - int pagecount; + union gve_rx_data_slot *data_slot; + struct sk_buff *skb = NULL; + dma_addr_t page_bus; u16 len; /* drop this packet */ @@ -294,71 +415,55 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, u64_stats_update_begin(&rx->statss); rx->rx_desc_err_dropped_pkt++; u64_stats_update_end(&rx->statss); - return true; + return false; } len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD; page_info = &rx->data.page_info[idx]; - dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx], - PAGE_SIZE, DMA_FROM_DEVICE); - /* gvnic can only receive into registered segments. If the buffer - * can't be recycled, our only choice is to copy the data out of - * it so that we can return it to the device. - */ + data_slot = &rx->data.data_ring[idx]; + page_bus = (rx->data.raw_addressing) ? + be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK : + rx->data.qpl->page_buses[idx]; + dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, + PAGE_SIZE, DMA_FROM_DEVICE); - if (PAGE_SIZE == 4096) { - if (len <= priv->rx_copybreak) { - /* Just copy small packets */ - skb = gve_rx_copy(rx, dev, napi, page_info, len); - u64_stats_update_begin(&rx->statss); - rx->rx_copybreak_pkt++; - u64_stats_update_end(&rx->statss); - goto have_skb; - } - if (unlikely(!gve_can_recycle_pages(dev))) { - skb = gve_rx_copy(rx, dev, napi, page_info, len); - goto have_skb; - } - pagecount = page_count(page_info->page); - if (pagecount == 1) { - /* No part of this page is used by any SKBs; we attach - * the page fragment to a new SKB and pass it up the - * stack. - */ - skb = gve_rx_add_frags(dev, napi, page_info, len); - if (!skb) { - u64_stats_update_begin(&rx->statss); - rx->rx_skb_alloc_fail++; - u64_stats_update_end(&rx->statss); - return true; + if (len <= priv->rx_copybreak) { + /* Just copy small packets */ + skb = gve_rx_copy(dev, napi, page_info, len); + u64_stats_update_begin(&rx->statss); + rx->rx_copied_pkt++; + rx->rx_copybreak_pkt++; + u64_stats_update_end(&rx->statss); + } else { + u8 can_flip = gve_rx_can_flip_buffers(dev); + int recycle = 0; + + if (can_flip) { + recycle = gve_rx_can_recycle_buffer(page_info->page); + if (recycle < 0) { + if (!rx->data.raw_addressing) + gve_schedule_reset(priv); + return false; } - /* Make sure the kernel stack can't release the page */ - get_page(page_info->page); - /* "flip" to other packet buffer on this page */ - gve_rx_flip_buff(page_info, &rx->data.data_ring[idx]); - } else if (pagecount >= 2) { - /* We have previously passed the other half of this - * page up the stack, but it has not yet been freed. - */ - skb = gve_rx_copy(rx, dev, napi, page_info, len); + } + + page_info->can_flip = can_flip && recycle; + if (rx->data.raw_addressing) { + skb = gve_rx_raw_addressing(&priv->pdev->dev, dev, + page_info, len, napi, + data_slot); } else { - WARN(pagecount < 1, "Pagecount should never be < 1"); - return false; + skb = gve_rx_qpl(&priv->pdev->dev, dev, rx, + page_info, len, napi, data_slot); } - } else { - skb = gve_rx_copy(rx, dev, napi, page_info, len); } -have_skb: - /* We didn't manage to allocate an skb but we haven't had any - * reset worthy failures. - */ if (!skb) { u64_stats_update_begin(&rx->statss); rx->rx_skb_alloc_fail++; u64_stats_update_end(&rx->statss); - return true; + return false; } if (likely(feat & NETIF_F_RXCSUM)) { @@ -399,19 +504,73 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx) return (GVE_SEQNO(flags_seq) == rx->desc.seqno); } +static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) +{ + int refill_target = rx->mask + 1; + u32 fill_cnt = rx->fill_cnt; + + while (fill_cnt - rx->cnt < refill_target) { + struct gve_rx_slot_page_info *page_info; + u32 idx = fill_cnt & rx->mask; + + page_info = &rx->data.page_info[idx]; + if (page_info->can_flip) { + /* The other half of the page is free because it was + * free when we processed the descriptor. Flip to it. + */ + union gve_rx_data_slot *data_slot = + &rx->data.data_ring[idx]; + + gve_rx_flip_buff(page_info, &data_slot->addr); + page_info->can_flip = 0; + } else { + /* It is possible that the networking stack has already + * finished processing all outstanding packets in the buffer + * and it can be reused. + * Flipping is unnecessary here - if the networking stack still + * owns half the page it is impossible to tell which half. Either + * the whole page is free or it needs to be replaced. + */ + int recycle = gve_rx_can_recycle_buffer(page_info->page); + + if (recycle < 0) { + if (!rx->data.raw_addressing) + gve_schedule_reset(priv); + return false; + } + if (!recycle) { + /* We can't reuse the buffer - alloc a new one*/ + union gve_rx_data_slot *data_slot = + &rx->data.data_ring[idx]; + struct device *dev = &priv->pdev->dev; + + gve_rx_free_buffer(dev, page_info, data_slot); + page_info->page = NULL; + if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot)) + break; + } + } + fill_cnt++; + } + rx->fill_cnt = fill_cnt; + return true; +} + bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, netdev_features_t feat) { struct gve_priv *priv = rx->gve; + u32 work_done = 0, packets = 0; struct gve_rx_desc *desc; u32 cnt = rx->cnt; u32 idx = cnt & rx->mask; - u32 work_done = 0; u64 bytes = 0; desc = rx->desc.desc_ring + idx; while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) && work_done < budget) { + bool dropped; + netif_info(priv, rx_status, priv->dev, "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n", rx->q_num, idx, desc, desc->flags_seq); @@ -419,9 +578,11 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, "[%d] seqno=%d rx->desc.seqno=%d\n", rx->q_num, GVE_SEQNO(desc->flags_seq), rx->desc.seqno); - bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; - if (!gve_rx(rx, desc, feat, idx)) - gve_schedule_reset(priv); + dropped = !gve_rx(rx, desc, feat, idx); + if (!dropped) { + bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; + packets++; + } cnt++; idx = cnt & rx->mask; desc = rx->desc.desc_ring + idx; @@ -429,15 +590,34 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, work_done++; } - if (!work_done) + if (!work_done && rx->fill_cnt - cnt > rx->db_threshold) return false; u64_stats_update_begin(&rx->statss); - rx->rpackets += work_done; + rx->rpackets += packets; rx->rbytes += bytes; u64_stats_update_end(&rx->statss); rx->cnt = cnt; - rx->fill_cnt += work_done; + + /* restock ring slots */ + if (!rx->data.raw_addressing) { + /* In QPL mode buffs are refilled as the desc are processed */ + rx->fill_cnt += work_done; + } else if (rx->fill_cnt - cnt <= rx->db_threshold) { + /* In raw addressing mode buffs are only refilled if the avail + * falls below a threshold. + */ + if (!gve_rx_refill_buffers(priv, rx)) + return false; + + /* If we were not able to completely refill buffers, we'll want + * to schedule this queue for work again to refill buffers. + */ + if (rx->fill_cnt - cnt <= rx->db_threshold) { + gve_rx_write_doorbell(priv, rx); + return true; + } + } gve_rx_write_doorbell(priv, rx); return gve_rx_work_pending(rx); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index d0244feb0301..6938f3a939d6 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -158,9 +158,11 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) tx->q_resources, tx->q_resources_bus); tx->q_resources = NULL; - gve_tx_fifo_release(priv, &tx->tx_fifo); - gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); - tx->tx_fifo.qpl = NULL; + if (!tx->raw_addressing) { + gve_tx_fifo_release(priv, &tx->tx_fifo); + gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); + tx->tx_fifo.qpl = NULL; + } bytes = sizeof(*tx->desc) * slots; dma_free_coherent(hdev, bytes, tx->desc, tx->bus); @@ -206,11 +208,15 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) if (!tx->desc) goto abort_with_info; - tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); + tx->raw_addressing = priv->raw_addressing; + tx->dev = &priv->pdev->dev; + if (!tx->raw_addressing) { + tx->tx_fifo.qpl = gve_assign_tx_qpl(priv); - /* map Tx FIFO */ - if (gve_tx_fifo_init(priv, &tx->tx_fifo)) - goto abort_with_desc; + /* map Tx FIFO */ + if (gve_tx_fifo_init(priv, &tx->tx_fifo)) + goto abort_with_desc; + } tx->q_resources = dma_alloc_coherent(hdev, @@ -228,7 +234,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) return 0; abort_with_fifo: - gve_tx_fifo_release(priv, &tx->tx_fifo); + if (!tx->raw_addressing) + gve_tx_fifo_release(priv, &tx->tx_fifo); abort_with_desc: dma_free_coherent(hdev, bytes, tx->desc, tx->bus); tx->desc = NULL; @@ -301,27 +308,47 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, return bytes; } -/* The most descriptors we could need are 3 - 1 for the headers, 1 for - * the beginning of the payload at the end of the FIFO, and 1 if the - * payload wraps to the beginning of the FIFO. +/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag, + * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor, + * and +1 if the payload wraps to the beginning of the FIFO. */ -#define MAX_TX_DESC_NEEDED 3 +#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3) +static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) +{ + if (info->skb) { + dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), + dma_unmap_len(&info->buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(&info->buf, len, 0); + } else { + dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), + dma_unmap_len(&info->buf, len), + DMA_TO_DEVICE); + dma_unmap_len_set(&info->buf, len, 0); + } +} /* Check if sufficient resources (descriptor ring space, FIFO space) are * available to transmit the given number of bytes. */ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) { - return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && - gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required)); + bool can_alloc = true; + + if (!tx->raw_addressing) + can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required); + + return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); } /* Stops the queue if the skb cannot be transmitted. */ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) { - int bytes_required; + int bytes_required = 0; + + if (!tx->raw_addressing) + bytes_required = gve_skb_fifo_bytes_required(tx, skb); - bytes_required = gve_skb_fifo_bytes_required(tx, skb); if (likely(gve_can_tx(tx, bytes_required))) return 0; @@ -395,17 +422,13 @@ static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, { u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; u64 first_page = iov_offset / PAGE_SIZE; - dma_addr_t dma; u64 page; - for (page = first_page; page <= last_page; page++) { - dma = page_buses[page]; - dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); - } + for (page = first_page; page <= last_page; page++) + dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE); } -static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, - struct device *dev) +static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb) { int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset; union gve_tx_desc *pkt_desc, *seg_desc; @@ -447,7 +470,7 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, skb_copy_bits(skb, 0, tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset, hlen); - gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, info->iov[hdr_nfrags - 1].iov_offset, info->iov[hdr_nfrags - 1].iov_len); copy_offset = hlen; @@ -463,7 +486,7 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, skb_copy_bits(skb, copy_offset, tx->tx_fifo.base + info->iov[i].iov_offset, info->iov[i].iov_len); - gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses, + gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses, info->iov[i].iov_offset, info->iov[i].iov_len); copy_offset += info->iov[i].iov_len; @@ -472,6 +495,94 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb, return 1 + payload_nfrags; } +static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, + struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + int hlen, payload_nfrags, l4_hdr_offset; + union gve_tx_desc *pkt_desc, *seg_desc; + struct gve_tx_buffer_state *info; + bool is_gso = skb_is_gso(skb); + u32 idx = tx->req & tx->mask; + struct gve_tx_dma_buf *buf; + u64 addr; + u32 len; + int i; + + info = &tx->info[idx]; + pkt_desc = &tx->desc[idx]; + + l4_hdr_offset = skb_checksum_start_offset(skb); + /* If the skb is gso, then we want only up to the tcp header in the first segment + * to efficiently replicate on each segment otherwise we want the linear portion + * of the skb (which will contain the checksum because skb->csum_start and + * skb->csum_offset are given relative to skb->head) in the first segment. + */ + hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb); + len = skb_headlen(skb); + + info->skb = skb; + + addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) { + tx->dma_mapping_error++; + goto drop; + } + buf = &info->buf; + dma_unmap_len_set(buf, len, len); + dma_unmap_addr_set(buf, dma, addr); + + payload_nfrags = shinfo->nr_frags; + if (hlen < len) { + /* For gso the rest of the linear portion of the skb needs to + * be in its own descriptor. + */ + payload_nfrags++; + gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, + 1 + payload_nfrags, hlen, addr); + + len -= hlen; + addr += hlen; + idx = (tx->req + 1) & tx->mask; + seg_desc = &tx->desc[idx]; + gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); + } else { + gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset, + 1 + payload_nfrags, hlen, addr); + } + + for (i = 0; i < shinfo->nr_frags; i++) { + const skb_frag_t *frag = &shinfo->frags[i]; + + idx = (idx + 1) & tx->mask; + seg_desc = &tx->desc[idx]; + len = skb_frag_size(frag); + addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx->dev, addr))) { + tx->dma_mapping_error++; + goto unmap_drop; + } + buf = &tx->info[idx].buf; + tx->info[idx].skb = NULL; + dma_unmap_len_set(buf, len, len); + dma_unmap_addr_set(buf, dma, addr); + + gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); + } + + return 1 + payload_nfrags; + +unmap_drop: + i += (payload_nfrags == shinfo->nr_frags ? 1 : 2); + while (i--) { + idx--; + gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]); + } +drop: + tx->dropped_pkt++; + return 0; +} + netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) { struct gve_priv *priv = netdev_priv(dev); @@ -490,17 +601,26 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_BUSY; } - nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev); - - netdev_tx_sent_queue(tx->netdev_txq, skb->len); - skb_tx_timestamp(skb); - - /* give packets to NIC */ - tx->req += nsegs; + if (tx->raw_addressing) + nsegs = gve_tx_add_skb_no_copy(priv, tx, skb); + else + nsegs = gve_tx_add_skb_copy(priv, tx, skb); + + /* If the packet is getting sent, we need to update the skb */ + if (nsegs) { + netdev_tx_sent_queue(tx->netdev_txq, skb->len); + skb_tx_timestamp(skb); + tx->req += nsegs; + } else { + dev_kfree_skb_any(skb); + } if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more()) return NETDEV_TX_OK; + /* Give packets to NIC. Even if this packet failed to send the doorbell + * might need to be rung because of xmit_more. + */ gve_tx_put_doorbell(priv, tx->q_resources, tx->req); return NETDEV_TX_OK; } @@ -525,24 +645,29 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, info = &tx->info[idx]; skb = info->skb; + /* Unmap the buffer */ + if (tx->raw_addressing) + gve_tx_unmap_buf(tx->dev, info); + tx->done++; /* Mark as free */ if (skb) { info->skb = NULL; bytes += skb->len; pkts++; dev_consume_skb_any(skb); + if (tx->raw_addressing) + continue; /* FIFO free */ for (i = 0; i < ARRAY_SIZE(info->iov); i++) { - space_freed += info->iov[i].iov_len + - info->iov[i].iov_padding; + space_freed += info->iov[i].iov_len + info->iov[i].iov_padding; info->iov[i].iov_len = 0; info->iov[i].iov_padding = 0; } } - tx->done++; } - gve_tx_free_fifo(&tx->tx_fifo, space_freed); + if (!tx->raw_addressing) + gve_tx_free_fifo(&tx->tx_fifo, space_freed); u64_stats_update_begin(&tx->statss); tx->bytes_done += bytes; tx->pkt_done += pkts; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index a9aca8c24e90..173d6966c1a3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -546,9 +546,9 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) obj_args.integer.type = ACPI_TYPE_INTEGER; obj_args.integer.value = mac_cb->mac_id; - argv4.type = ACPI_TYPE_PACKAGE, - argv4.package.count = 1, - argv4.package.elements = &obj_args, + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &obj_args; obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), &hns_dsaf_acpi_dsm_guid, 0, @@ -593,9 +593,9 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt) obj_args.integer.type = ACPI_TYPE_INTEGER; obj_args.integer.value = mac_cb->mac_id; - argv4.type = ACPI_TYPE_PACKAGE, - argv4.package.count = 1, - argv4.package.elements = &obj_args, + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 1; + argv4.package.elements = &obj_args; obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), &hns_dsaf_acpi_dsm_guid, 0, diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 1ffe8fac702d..fb5e8842983c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -110,6 +110,7 @@ struct hclge_vf_to_pf_msg { u8 en_bc; u8 en_uc; u8 en_mc; + u8 en_limit_promisc; }; struct { u8 vector_id; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 912c51e327d6..a7daf6d4511e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -25,10 +25,13 @@ #include <linux/dcbnl.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> +#include <linux/pkt_sched.h> #include <linux/types.h> +#include <net/pkt_cls.h> #define HNAE3_MOD_VERSION "1.0" @@ -80,12 +83,13 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_PTP_B, HNAE3_DEV_SUPPORT_INT_QL_B, - HNAE3_DEV_SUPPORT_SIMPLE_BD_B, + HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_TX_PUSH_B, HNAE3_DEV_SUPPORT_PHY_IMP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_HW_PAD_B, HNAE3_DEV_SUPPORT_STASH_B, + HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -112,8 +116,8 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_dev_int_ql_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps) -#define hnae3_dev_simple_bd_supported(hdev) \ - test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps) +#define hnae3_dev_hw_csum_supported(hdev) \ + test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, (hdev)->ae_dev->caps) #define hnae3_dev_tx_push_supported(hdev) \ test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps) @@ -278,6 +282,7 @@ struct hnae3_dev_specs { u16 rss_ind_tbl_size; u16 rss_key_size; u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */ + u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */ u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ }; @@ -454,6 +459,12 @@ struct hnae3_ae_dev { * Configure the default MAC for specified VF * get_module_eeprom * Get the optical module eeprom info. + * add_cls_flower + * Add clsflower rule + * del_cls_flower + * Delete clsflower rule + * cls_flower_active + * Check if any cls flower rule exist */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -631,6 +642,11 @@ struct hnae3_ae_ops { int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset, u32 len, u8 *data); bool (*get_cmdq_stat)(struct hnae3_handle *handle); + int (*add_cls_flower)(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, int tc); + int (*del_cls_flower)(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower); + bool (*cls_flower_active)(struct hnae3_handle *handle); }; struct hnae3_dcb_ops { @@ -644,7 +660,8 @@ struct hnae3_dcb_ops { u8 (*getdcbx)(struct hnae3_handle *); u8 (*setdcbx)(struct hnae3_handle *, u8); - int (*setup_tc)(struct hnae3_handle *, u8, u8 *); + int (*setup_tc)(struct hnae3_handle *handle, + struct tc_mqprio_qopt_offload *mqprio_qopt); }; struct hnae3_ae_algo { @@ -656,15 +673,17 @@ struct hnae3_ae_algo { #define HNAE3_INT_NAME_LEN 32 #define HNAE3_ITR_COUNTDOWN_START 100 +#define HNAE3_MAX_TC 8 +#define HNAE3_MAX_USER_PRIO 8 struct hnae3_tc_info { - u16 tqp_offset; /* TQP offset from base TQP */ - u16 tqp_count; /* Total TQPs */ - u8 tc; /* TC index */ - bool enable; /* If this TC is enable or not */ + u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ + u16 tqp_count[HNAE3_MAX_TC]; + u16 tqp_offset[HNAE3_MAX_TC]; + unsigned long tc_en; /* bitmap of TC enabled */ + u8 num_tc; /* Total number of enabled TCs */ + bool mqprio_active; }; -#define HNAE3_MAX_TC 8 -#define HNAE3_MAX_USER_PRIO 8 struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ @@ -673,9 +692,7 @@ struct hnae3_knic_private_info { u16 num_tx_desc; u16 num_rx_desc; - u8 num_tc; /* Total number of enabled TCs */ - u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ - struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ + struct hnae3_tc_info tc_info; u16 num_tqps; /* total number of TQPs in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ @@ -688,6 +705,7 @@ struct hnae3_knic_private_info { struct hnae3_roce_private_info { struct net_device *netdev; void __iomem *roce_io_base; + void __iomem *roce_mem_base; int base_vector; int num_vectors; @@ -715,6 +733,11 @@ struct hnae3_roce_private_info { #define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE) #define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE) +enum hnae3_pflag { + HNAE3_PFLAG_LIMIT_PROMISC, + HNAE3_PFLAG_MAX +}; + struct hnae3_handle { struct hnae3_client *client; struct pci_dev *pdev; @@ -737,6 +760,9 @@ struct hnae3_handle { /* Network interface message level enabled bits */ u32 msg_enable; + + unsigned long supported_pflags; + unsigned long priv_flags; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index dc9a85745e62..9d4e9c053a8f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -178,6 +178,8 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) u32 tx_index, rx_index; u32 q_num, value; dma_addr_t addr; + u16 mss_hw_csum; + u32 l234info; int cnt; cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index); @@ -206,26 +208,46 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) tx_desc = &ring->desc[tx_index]; addr = le64_to_cpu(tx_desc->addr); + mss_hw_csum = le16_to_cpu(tx_desc->tx.mss_hw_csum); dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index); dev_info(dev, "(TX)addr: %pad\n", &addr); dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag)); dev_info(dev, "(TX)send_size: %u\n", le16_to_cpu(tx_desc->tx.send_size)); - dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); - dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); - dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); - dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); + + if (mss_hw_csum & BIT(HNS3_TXD_HW_CS_B)) { + u32 offset = le32_to_cpu(tx_desc->tx.ol_type_vlan_len_msec); + u32 start = le32_to_cpu(tx_desc->tx.type_cs_vlan_tso_len); + + dev_info(dev, "(TX)csum start: %u\n", + hnae3_get_field(start, + HNS3_TXD_CSUM_START_M, + HNS3_TXD_CSUM_START_S)); + dev_info(dev, "(TX)csum offset: %u\n", + hnae3_get_field(offset, + HNS3_TXD_CSUM_OFFSET_M, + HNS3_TXD_CSUM_OFFSET_S)); + } else { + dev_info(dev, "(TX)vlan_tso: %u\n", + tx_desc->tx.type_cs_vlan_tso); + dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); + dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); + dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); + dev_info(dev, "(TX)vlan_msec: %u\n", + tx_desc->tx.ol_type_vlan_msec); + dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len); + dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len); + dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len); + } + dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.outer_vlan_tag)); dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv)); - dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec); - dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len); - dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len); - dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len); - dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen)); + dev_info(dev, "(TX)paylen_ol4cs: %u\n", + le32_to_cpu(tx_desc->tx.paylen_ol4cs)); dev_info(dev, "(TX)vld_ra_ri: %u\n", le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri)); - dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss)); + dev_info(dev, "(TX)mss_hw_csum: %u\n", mss_hw_csum); ring = &priv->ring[q_num + h->kinfo.num_tqps]; value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); @@ -233,10 +255,21 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) rx_desc = &ring->desc[rx_index]; addr = le64_to_cpu(rx_desc->addr); + l234info = le32_to_cpu(rx_desc->rx.l234_info); dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); dev_info(dev, "(RX)addr: %pad\n", &addr); - dev_info(dev, "(RX)l234_info: %u\n", - le32_to_cpu(rx_desc->rx.l234_info)); + dev_info(dev, "(RX)l234_info: %u\n", l234info); + + if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) { + u32 lo, hi; + + lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M, + HNS3_RXD_L2_CSUM_L_S); + hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M, + HNS3_RXD_L2_CSUM_H_S); + dev_info(dev, "(RX)csum: %u\n", lo | hi << 8); + } + dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len)); dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size)); dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash)); @@ -324,6 +357,11 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h) test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no"); dev_info(&h->pdev->dev, "support INT QL: %s\n", test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no"); + dev_info(&h->pdev->dev, "support HW TX csum: %s\n", + test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, caps) ? "yes" : "no"); + dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n", + test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ? + "yes" : "no"); } static void hns3_dbg_dev_specs(struct hnae3_handle *h) @@ -347,8 +385,10 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h) dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max); + dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl); } static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index a362516a3185..405e49033417 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -211,8 +211,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing */ - if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable && - !tqp_vector->rx_group.coal.gl_adapt_enable) + if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && + !tqp_vector->rx_group.coal.adapt_enable) /* According to the hardware, the range of rl_reg is * 0-59 and the unit is 4. */ @@ -224,61 +224,113 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector, u32 gl_value) { - u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value); + u32 new_val; - writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + if (tqp_vector->rx_group.coal.unit_1us) + new_val = gl_value | HNS3_INT_GL_1US; + else + new_val = hns3_gl_usec_to_reg(gl_value); + + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); } void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, u32 gl_value) { - u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value); + u32 new_val; + + if (tqp_vector->tx_group.coal.unit_1us) + new_val = gl_value | HNS3_INT_GL_1US; + else + new_val = hns3_gl_usec_to_reg(gl_value); + + writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); +} + +void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value) +{ + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); +} - writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); +void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value) +{ + writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); } -static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, - struct hns3_nic_priv *priv) +static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; + /* initialize the configuration for interrupt coalescing. * 1. GL (Interrupt Gap Limiter) * 2. RL (Interrupt Rate Limiter) + * 3. QL (Interrupt Quantity Limiter) * * Default: enable interrupt coalescing self-adaptive and GL */ - tqp_vector->tx_group.coal.gl_adapt_enable = 1; - tqp_vector->rx_group.coal.gl_adapt_enable = 1; + tx_coal->adapt_enable = 1; + rx_coal->adapt_enable = 1; - tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; - tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; + tx_coal->int_gl = HNS3_INT_GL_50K; + rx_coal->int_gl = HNS3_INT_GL_50K; - tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; - tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; + rx_coal->flow_level = HNS3_FLOW_LOW; + tx_coal->flow_level = HNS3_FLOW_LOW; + + /* device version above V3(include V3), GL can configure 1us + * unit, so uses 1us unit. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + tx_coal->unit_1us = 1; + rx_coal->unit_1us = 1; + } + + if (ae_dev->dev_specs.int_ql_max) { + tx_coal->ql_enable = 1; + rx_coal->ql_enable = 1; + tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; + rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; + tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; + rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; + } } -static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector, - struct hns3_nic_priv *priv) +static void +hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector, + struct hns3_nic_priv *priv) { + struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; + struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; struct hnae3_handle *h = priv->ae_handle; - hns3_set_vector_coalesce_tx_gl(tqp_vector, - tqp_vector->tx_group.coal.int_gl); - hns3_set_vector_coalesce_rx_gl(tqp_vector, - tqp_vector->rx_group.coal.int_gl); + hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); + hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); + + if (tx_coal->ql_enable) + hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); + + if (rx_coal->ql_enable) + hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); } static int hns3_nic_set_real_num_queue(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + unsigned int queue_size = kinfo->num_tqps; int i, ret; - if (kinfo->num_tc <= 1) { + if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { netdev_reset_tc(netdev); } else { - ret = netdev_set_num_tc(netdev, kinfo->num_tc); + ret = netdev_set_num_tc(netdev, tc_info->num_tc); if (ret) { netdev_err(netdev, "netdev_set_num_tc fail, ret=%d!\n", ret); @@ -286,13 +338,11 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev) } for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) + if (!test_bit(i, &tc_info->tc_en)) continue; - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); + netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], + tc_info->tqp_offset[i]); } } @@ -318,7 +368,7 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h) u16 alloc_tqps, max_rss_size, rss_size; h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); - rss_size = alloc_tqps / h->kinfo.num_tc; + rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; return min_t(u16, rss_size, max_rss_size); } @@ -457,7 +507,7 @@ static int hns3_nic_net_open(struct net_device *netdev) kinfo = &h->kinfo; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) - netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]); + netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); @@ -644,7 +694,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable) } } -static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs, u16 *mss, u32 *type_cs_vlan_tso) { u32 l4_offset, hdr_len; @@ -674,15 +724,6 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, SKB_GSO_GRE_CSUM | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if ((!(skb_shinfo(skb)->gso_type & - SKB_GSO_PARTIAL)) && - (skb_shinfo(skb)->gso_type & - SKB_GSO_UDP_TUNNEL_CSUM)) { - /* Software should clear the udp's checksum - * field when tso is needed. - */ - l4.udp->check = 0; - } /* reset l3&l4 pointers from outer to inner headers */ l3.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); @@ -711,9 +752,13 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, } /* find the txbd field values */ - *paylen = skb->len - hdr_len; + *paylen_fdop_ol4cs = skb->len - hdr_len; hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); + /* offload outer UDP header checksum */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) + hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1); + /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -782,8 +827,16 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { + struct hns3_nic_priv *priv = netdev_priv(skb->dev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); union l4_hdr_info l4; + /* device version above V3(include V3), the hardware can + * do this checksum offload. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + return false; + l4.hdr = skb_transport_header(skb); if (!(!skb->encapsulation && @@ -952,6 +1005,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, struct sk_buff *skb) { struct hnae3_handle *handle = tx_ring->tqp->handle; + struct hnae3_ae_dev *ae_dev; struct vlan_ethhdr *vhdr; int rc; @@ -959,10 +1013,13 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, skb_vlan_tag_present(skb))) return 0; - /* Since HW limitation, if port based insert VLAN enabled, only one VLAN - * header is allowed in skb, otherwise it will cause RAS error. + /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert + * VLAN enabled, only one VLAN header is allowed in skb, otherwise it + * will cause RAS error. */ + ae_dev = pci_get_drvdata(handle->pdev); if (unlikely(skb_vlan_tagged_multi(skb) && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE)) return -EINVAL; @@ -1004,15 +1061,31 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, return 0; } +/* check if the hardware is capable of checksum offloading */ +static bool hns3_check_hw_tx_csum(struct sk_buff *skb) +{ + struct hns3_nic_priv *priv = netdev_priv(skb->dev); + + /* Kindly note, due to backward compatibility of the TX descriptor, + * HW checksum of the non-IP packets and GSO packets is handled at + * different place in the following code + */ + if (skb->csum_not_inet || skb_is_gso(skb) || + !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) + return false; + + return true; +} + static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, struct sk_buff *skb, struct hns3_desc *desc) { u32 ol_type_vlan_len_msec = 0; + u32 paylen_ol4cs = skb->len; u32 type_cs_vlan_tso = 0; - u32 paylen = skb->len; + u16 mss_hw_csum = 0; u16 inner_vtag = 0; u16 out_vtag = 0; - u16 mss = 0; int ret; ret = hns3_handle_vtags(ring, skb); @@ -1037,6 +1110,17 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, if (skb->ip_summed == CHECKSUM_PARTIAL) { u8 ol4_proto, il4_proto; + if (hns3_check_hw_tx_csum(skb)) { + /* set checksum start and offset, defined in 2 Bytes */ + hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, + skb_checksum_start_offset(skb) >> 1); + hns3_set_field(ol_type_vlan_len_msec, + HNS3_TXD_CSUM_OFFSET_S, + skb->csum_offset >> 1); + mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); + goto out_hw_tx_csum; + } + skb_reset_mac_len(skb); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); @@ -1057,7 +1141,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, return ret; } - ret = hns3_set_tso(skb, &paylen, &mss, + ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, &type_cs_vlan_tso); if (unlikely(ret < 0)) { u64_stats_update_begin(&ring->syncp); @@ -1067,12 +1151,13 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, } } +out_hw_tx_csum: /* Set txbd */ desc->tx.ol_type_vlan_len_msec = cpu_to_le32(ol_type_vlan_len_msec); desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le32(paylen); - desc->tx.mss = cpu_to_le16(mss); + desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); + desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); @@ -1583,6 +1668,13 @@ static int hns3_nic_set_features(struct net_device *netdev, h->ae_algo->ops->enable_fd(h, enable); } + if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + h->ae_algo->ops->cls_flower_active(h)) { + netdev_err(netdev, + "there are offloaded TC filters active, cannot disable HW TC offload"); + return -EINVAL; + } + netdev->features = features; return 0; } @@ -1708,7 +1800,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev, static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; struct hnae3_knic_private_info *kinfo; u8 tc = mqprio_qopt->qopt.num_tc; u16 mode = mqprio_qopt->mode; @@ -1731,16 +1822,70 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc); return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP; + kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; } +static int hns3_setup_tc_cls_flower(struct hns3_nic_priv *priv, + struct flow_cls_offload *flow) +{ + int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); + struct hnae3_handle *h = hns3_get_handle(priv->netdev); + + switch (flow->command) { + case FLOW_CLS_REPLACE: + if (h->ae_algo->ops->add_cls_flower) + return h->ae_algo->ops->add_cls_flower(h, flow, tc); + break; + case FLOW_CLS_DESTROY: + if (h->ae_algo->ops->del_cls_flower) + return h->ae_algo->ops->del_cls_flower(h, flow); + break; + default: + break; + } + + return -EOPNOTSUPP; +} + +static int hns3_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct hns3_nic_priv *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return hns3_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(hns3_block_cb_list); + static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type != TC_SETUP_QDISC_MQPRIO) + struct hns3_nic_priv *priv = netdev_priv(dev); + int ret; + + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + ret = hns3_setup_tc(dev, type_data); + break; + case TC_SETUP_BLOCK: + ret = flow_block_cb_setup_simple(type_data, + &hns3_block_cb_list, + hns3_setup_tc_block_cb, + priv, priv, true); + break; + default: return -EOPNOTSUPP; + } - return hns3_setup_tc(dev, type_data); + return ret; } static int hns3_vlan_rx_add_vid(struct net_device *netdev, @@ -2275,39 +2420,32 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; + NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_FRAGLIST; + NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - netdev->vlan_features |= - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + netdev->vlan_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_FRAGLIST; + NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | - NETIF_F_FRAGLIST; + NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { netdev->hw_features |= NETIF_F_GRO_HW; @@ -2325,6 +2463,30 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->vlan_features |= NETIF_F_GSO_UDP_L4; netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; } + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) { + netdev->hw_features |= NETIF_F_HW_CSUM; + netdev->features |= NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; + } else { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + } + + if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { + netdev->hw_features |= NETIF_F_HW_TC; + netdev->features |= NETIF_F_HW_TC; + } } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -2747,6 +2909,22 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) return 0; } +static void hns3_checksum_complete(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 l234info) +{ + u32 lo, hi; + + u64_stats_update_begin(&ring->syncp); + ring->stats.csum_complete++; + u64_stats_update_end(&ring->syncp); + skb->ip_summed = CHECKSUM_COMPLETE; + lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M, + HNS3_RXD_L2_CSUM_L_S); + hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M, + HNS3_RXD_L2_CSUM_H_S); + skb->csum = csum_unfold((__force __sum16)(lo | hi << 8)); +} + static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 l234info, u32 bd_base_info, u32 ol_info) { @@ -2761,6 +2939,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (!(netdev->features & NETIF_F_RXCSUM)) return; + if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) { + hns3_checksum_complete(ring, skb, l234info); + return; + } + /* check if hardware has done checksum */ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; @@ -3333,14 +3516,14 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) tqp_vector->last_jiffies + msecs_to_jiffies(1000))) return; - if (rx_group->coal.gl_adapt_enable) { + if (rx_group->coal.adapt_enable) { rx_update = hns3_get_new_int_gl(rx_group); if (rx_update) hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_group->coal.int_gl); } - if (tx_group->coal.gl_adapt_enable) { + if (tx_group->coal.adapt_enable) { tx_update = hns3_get_new_int_gl(tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, @@ -3536,7 +3719,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; - hns3_vector_gl_rl_init_hw(tqp_vector, priv); + hns3_vector_coalesce_init_hw(tqp_vector, priv); tqp_vector->num_tqps = 0; } @@ -3594,8 +3777,6 @@ map_ring_fail: static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) { -#define HNS3_VECTOR_PF_MAX_NUM 64 - struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_vector_info *vector; @@ -3608,7 +3789,6 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) /* RSS size, cpu online and vector_num should be the same */ /* Should consider 2p/4p later */ vector_num = min_t(u16, num_online_cpus(), tqp_num); - vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), GFP_KERNEL); @@ -3632,7 +3812,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) tqp_vector->idx = i; tqp_vector->mask_addr = vector[i].io_addr; tqp_vector->vector_irq = vector[i].vector; - hns3_vector_gl_rl_init(tqp_vector, priv); + hns3_vector_coalesce_init(tqp_vector, priv); } out: @@ -3864,21 +4044,20 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; int i; for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; int j; - if (!tc_info->enable) + if (!test_bit(i, &tc_info->tc_en)) continue; - for (j = 0; j < tc_info->tqp_count; j++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { struct hnae3_queue *q; - q = priv->ring[tc_info->tqp_offset + j].tqp; - hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, - tc_info->tc); + q = priv->ring[tc_info->tqp_offset[i] + j].tqp; + hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, i); } } } @@ -4005,7 +4184,8 @@ static void hns3_info_show(struct hns3_nic_priv *priv) dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); - dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", + kinfo->tc_info.num_tc); dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); } @@ -4109,8 +4289,14 @@ static int hns3_client_init(struct hnae3_handle *handle) /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ netdev->max_mtu = HNS3_MAX_MTU; + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + if (netif_msg_drv(handle)) hns3_info_show(priv); @@ -4570,6 +4756,12 @@ int hns3_set_channels(struct net_device *netdev, if (ch->rx_count || ch->tx_count) return -EINVAL; + if (kinfo->tc_info.mqprio_active) { + dev_err(&netdev->dev, + "it's not allowed to set channels via ethtool when MQPRIO mode is on\n"); + return -EINVAL; + } + if (new_tqp_num > hns3_get_max_available_channels(h) || new_tqp_num < 1) { dev_err(&netdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 1c81dea0da1e..0a7b606e7c93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -18,6 +18,7 @@ enum hns3_nic_state { HNS3_NIC_STATE_SERVICE_INITED, HNS3_NIC_STATE_SERVICE_SCHED, HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, HNS3_NIC_STATE_MAX }; @@ -82,6 +83,12 @@ enum hns3_nic_state { #define HNS3_RXD_STRP_TAGP_S 13 #define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) +#define HNS3_RXD_L2_CSUM_B 15 +#define HNS3_RXD_L2_CSUM_L_S 4 +#define HNS3_RXD_L2_CSUM_L_M (0xff << HNS3_RXD_L2_CSUM_L_S) +#define HNS3_RXD_L2_CSUM_H_S 24 +#define HNS3_RXD_L2_CSUM_H_M (0xff << HNS3_RXD_L2_CSUM_H_S) + #define HNS3_RXD_L2E_B 16 #define HNS3_RXD_L3E_B 17 #define HNS3_RXD_L4E_B 18 @@ -139,6 +146,9 @@ enum hns3_nic_state { #define HNS3_TXD_L4LEN_S 24 #define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) +#define HNS3_TXD_CSUM_START_S 8 +#define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S) + #define HNS3_TXD_OL3T_S 0 #define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) #define HNS3_TXD_OVLAN_B 2 @@ -146,6 +156,9 @@ enum hns3_nic_state { #define HNS3_TXD_TUNTYPE_S 4 #define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) +#define HNS3_TXD_CSUM_OFFSET_S 8 +#define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S) + #define HNS3_TXD_BDTYPE_S 0 #define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) #define HNS3_TXD_FE_B 4 @@ -159,8 +172,11 @@ enum hns3_nic_state { #define HNS3_TXD_DECTTL_S 12 #define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) +#define HNS3_TXD_OL4CS_B 22 + #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) +#define HNS3_TXD_HW_CS_B 14 #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) @@ -181,6 +197,8 @@ enum hns3_nic_state { #define HNS3_VECTOR_GL2_OFFSET 0x300 #define HNS3_VECTOR_RL_OFFSET 0x900 #define HNS3_VECTOR_RL_EN_B 6 +#define HNS3_VECTOR_TX_QL_OFFSET 0xe00 +#define HNS3_VECTOR_RX_QL_OFFSET 0xf00 #define HNS3_RING_EN_B 0 @@ -248,9 +266,9 @@ struct __packed hns3_desc { }; }; - __le32 paylen; + __le32 paylen_ol4cs; __le16 bdtp_fe_sc_vld_ra_ri; - __le16 mss; + __le16 mss_hw_csum; } tx; struct { @@ -369,6 +387,7 @@ struct ring_stats { u64 err_bd_num; u64 l2_err; u64 l3l4_csum_err; + u64 csum_complete; u64 rx_multicast; u64 non_reuse_pg; }; @@ -418,18 +437,25 @@ enum hns3_flow_level_range { HNS3_FLOW_ULTRA = 3, }; -#define HNS3_INT_GL_MAX 0x1FE0 #define HNS3_INT_GL_50K 0x0014 #define HNS3_INT_GL_20K 0x0032 #define HNS3_INT_GL_18K 0x0036 #define HNS3_INT_GL_8K 0x007C +#define HNS3_INT_GL_1US BIT(31) + #define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_ENABLE_MASK 0x40 +#define HNS3_INT_QL_DEFAULT_CFG 0x20 + struct hns3_enet_coalesce { u16 int_gl; - u8 gl_adapt_enable; + u16 int_ql; + u16 int_ql_max; + u8 adapt_enable:1; + u8 ql_enable:1; + u8 unit_1us:1; enum hns3_flow_level_range flow_level; }; @@ -595,6 +621,10 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector, u32 gl_value); void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector, u32 rl_value); +void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value); +void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, + u32 ql_value); void hns3_enable_vlan_filter(struct net_device *netdev, bool enable); void hns3_request_update_promisc_mode(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6b07b2771172..e2fc443fe92c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -18,6 +18,11 @@ struct hns3_sfp_type { u8 ext_type; }; +struct hns3_pflag_desc { + char name[ETH_GSTRING_LEN]; + void (*handler)(struct net_device *netdev, bool enable); +}; + /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ @@ -55,10 +60,13 @@ static const struct hns3_stats hns3_rxq_stats[] = { HNS3_TQP_STAT("err_bd_num", err_bd_num), HNS3_TQP_STAT("l2_err", l2_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("csum_complete", csum_complete), HNS3_TQP_STAT("multicast", rx_multicast), HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), }; +#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags) + #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) @@ -394,6 +402,23 @@ static void hns3_self_test(struct net_device *ndev, netif_dbg(h, drv, ndev, "self test end\n"); } +static void hns3_update_limit_promisc_mode(struct net_device *netdev, + bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (enable) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + + hns3_request_update_promisc_mode(handle); +} + +static const struct hns3_pflag_desc hns3_priv_flags[HNAE3_PFLAG_MAX] = { + { "limit_promisc", hns3_update_limit_promisc_mode } +}; + static int hns3_get_sset_count(struct net_device *netdev, int stringset) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -410,6 +435,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) case ETH_SS_TEST: return ops->get_sset_count(h, stringset); + case ETH_SS_PRIV_FLAGS: + return HNAE3_PFLAG_MAX; + default: return -EOPNOTSUPP; } @@ -463,6 +491,7 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; char *buff = (char *)data; + int i; if (!ops->get_strings) return; @@ -475,6 +504,13 @@ static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_TEST: ops->get_strings(h, stringset, data); break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < HNS3_PRIV_FLAGS_LEN; i++) { + snprintf(buff, ETH_GSTRING_LEN, "%s", + hns3_priv_flags[i].name); + buff += ETH_GSTRING_LEN; + } + break; default: break; } @@ -1105,9 +1141,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, rx_vector = priv->ring[queue_num + queue].tqp_vector; cmd->use_adaptive_tx_coalesce = - tx_vector->tx_group.coal.gl_adapt_enable; + tx_vector->tx_group.coal.adapt_enable; cmd->use_adaptive_rx_coalesce = - rx_vector->rx_group.coal.gl_adapt_enable; + rx_vector->rx_group.coal.adapt_enable; cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl; cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl; @@ -1115,6 +1151,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting; cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting; + cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql; + cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql; + return 0; } @@ -1127,22 +1166,30 @@ static int hns3_get_coalesce(struct net_device *netdev, static int hns3_check_gl_coalesce_para(struct net_device *netdev, struct ethtool_coalesce *cmd) { + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); u32 rx_gl, tx_gl; - if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) { + if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) { netdev_err(netdev, - "Invalid rx-usecs value, rx-usecs range is 0-%d\n", - HNS3_INT_GL_MAX); + "invalid rx-usecs value, rx-usecs range is 0-%u\n", + ae_dev->dev_specs.max_int_gl); return -EINVAL; } - if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) { + if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) { netdev_err(netdev, - "Invalid tx-usecs value, tx-usecs range is 0-%d\n", - HNS3_INT_GL_MAX); + "invalid tx-usecs value, tx-usecs range is 0-%u\n", + ae_dev->dev_specs.max_int_gl); return -EINVAL; } + /* device version above V3(include V3), GL uses 1us unit, + * so the round down is not needed. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + return 0; + rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); if (rx_gl != cmd->rx_coalesce_usecs) { netdev_info(netdev, @@ -1188,6 +1235,29 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev, return 0; } +static int hns3_check_ql_coalesce_param(struct net_device *netdev, + struct ethtool_coalesce *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + + if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) && + !ae_dev->dev_specs.int_ql_max) { + netdev_err(netdev, "coalesced frames is not supported\n"); + return -EOPNOTSUPP; + } + + if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max || + cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) { + netdev_err(netdev, + "invalid coalesced_frames value, range is 0-%u\n", + ae_dev->dev_specs.int_ql_max); + return -ERANGE; + } + + return 0; +} + static int hns3_check_coalesce_para(struct net_device *netdev, struct ethtool_coalesce *cmd) { @@ -1207,6 +1277,10 @@ static int hns3_check_coalesce_para(struct net_device *netdev, return ret; } + ret = hns3_check_ql_coalesce_param(netdev, cmd); + if (ret) + return ret; + if (cmd->use_adaptive_tx_coalesce == 1 || cmd->use_adaptive_rx_coalesce == 1) { netdev_info(netdev, @@ -1230,14 +1304,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, tx_vector = priv->ring[queue].tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector; - tx_vector->tx_group.coal.gl_adapt_enable = + tx_vector->tx_group.coal.adapt_enable = cmd->use_adaptive_tx_coalesce; - rx_vector->rx_group.coal.gl_adapt_enable = + rx_vector->rx_group.coal.adapt_enable = cmd->use_adaptive_rx_coalesce; tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs; rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs; + tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames; + rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames; + hns3_set_vector_coalesce_tx_gl(tx_vector, tx_vector->tx_group.coal.int_gl); hns3_set_vector_coalesce_rx_gl(rx_vector, @@ -1245,6 +1322,13 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting); hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting); + + if (tx_vector->tx_group.coal.ql_enable) + hns3_set_vector_coalesce_tx_ql(tx_vector, + tx_vector->tx_group.coal.int_ql); + if (rx_vector->rx_group.coal.ql_enable) + hns3_set_vector_coalesce_rx_ql(rx_vector, + rx_vector->rx_group.coal.int_ql); } static int hns3_set_coalesce(struct net_device *netdev, @@ -1468,10 +1552,58 @@ static int hns3_get_module_eeprom(struct net_device *netdev, return ops->get_module_eeprom(handle, ee->offset, ee->len, data); } +static u32 hns3_get_priv_flags(struct net_device *netdev) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + return handle->priv_flags; +} + +static int hns3_check_priv_flags(struct hnae3_handle *h, u32 changed) +{ + u32 i; + + for (i = 0; i < HNAE3_PFLAG_MAX; i++) + if ((changed & BIT(i)) && !test_bit(i, &h->supported_pflags)) { + netdev_err(h->netdev, "%s is unsupported\n", + hns3_priv_flags[i].name); + return -EOPNOTSUPP; + } + + return 0; +} + +static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + u32 changed = pflags ^ handle->priv_flags; + int ret; + u32 i; + + ret = hns3_check_priv_flags(handle, changed); + if (ret) + return ret; + + for (i = 0; i < HNAE3_PFLAG_MAX; i++) { + if (changed & BIT(i)) { + bool enable = !(handle->priv_flags & BIT(i)); + + if (enable) + handle->priv_flags |= BIT(i); + else + handle->priv_flags &= ~BIT(i); + hns3_priv_flags[i].handler(netdev, enable); + } + } + + return 0; +} + #define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \ ETHTOOL_COALESCE_USE_ADAPTIVE | \ ETHTOOL_COALESCE_RX_USECS_HIGH | \ - ETHTOOL_COALESCE_TX_USECS_HIGH) + ETHTOOL_COALESCE_TX_USECS_HIGH | \ + ETHTOOL_COALESCE_MAX_FRAMES) static const struct ethtool_ops hns3vf_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, @@ -1497,6 +1629,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_link = hns3_get_link, .get_msglevel = hns3_get_msglevel, .set_msglevel = hns3_set_msglevel, + .get_priv_flags = hns3_get_priv_flags, + .set_priv_flags = hns3_set_priv_flags, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1533,6 +1667,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .set_fecparam = hns3_set_fecparam, .get_module_info = hns3_get_module_info, .get_module_eeprom = hns3_get_module_eeprom, + .get_priv_flags = hns3_get_priv_flags, + .set_priv_flags = hns3_set_priv_flags, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index e6321dda0f3f..b728be4737f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -355,6 +355,12 @@ static void hclge_parse_capability(struct hclge_dev *hdev, set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B)) set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B)) + set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B)) + set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) + set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); } static enum hclge_cmd_status diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 096e26a2e16b..edfadb5cb1c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -307,6 +307,9 @@ enum hclge_opcode_type { #define HCLGE_TQP_REG_OFFSET 0x80000 #define HCLGE_TQP_REG_SIZE 0x200 +#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024 +#define HCLGE_TQP_EXT_REG_OFFSET 0x100 + #define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 #define HCLGE_RCB_INIT_FLAG_EN_B 0 #define HCLGE_RCB_INIT_FLAG_FINI_B 8 @@ -336,7 +339,9 @@ enum hclge_int_type { }; struct hclge_ctrl_vector_chain_cmd { - u8 int_vector_id; +#define HCLGE_VECTOR_ID_L_S 0 +#define HCLGE_VECTOR_ID_L_M GENMASK(7, 0) + u8 int_vector_id_l; u8 int_cause_num; #define HCLGE_INT_TYPE_S 0 #define HCLGE_INT_TYPE_M GENMASK(1, 0) @@ -346,7 +351,9 @@ struct hclge_ctrl_vector_chain_cmd { #define HCLGE_INT_GL_IDX_M GENMASK(14, 13) __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; u8 vfid; - u8 rsv; +#define HCLGE_VECTOR_ID_H_S 8 +#define HCLGE_VECTOR_ID_H_M GENMASK(15, 8) + u8 int_vector_id_h; }; #define HCLGE_MAX_TC_NUM 8 @@ -369,12 +376,13 @@ enum HCLGE_CAP_BITS { HCLGE_CAP_FD_FORWARD_TC_B, HCLGE_CAP_PTP_B, HCLGE_CAP_INT_QL_B, - HCLGE_CAP_SIMPLE_BD_B, + HCLGE_CAP_HW_TX_CSUM_B, HCLGE_CAP_TX_PUSH_B, HCLGE_CAP_PHY_IMP_B, HCLGE_CAP_TQP_TXRX_INDEP_B, HCLGE_CAP_HW_PAD_B, HCLGE_CAP_STASH_B, + HCLGE_CAP_UDP_TUNNEL_CSUM_B, }; #define HCLGE_QUERY_CAP_LENGTH 3 @@ -470,16 +478,13 @@ struct hclge_pf_res_cmd { __le16 tqp_num; __le16 buf_size; __le16 msixcap_localid_ba_nic; - __le16 msixcap_localid_ba_rocee; -#define HCLGE_MSIX_OFT_ROCEE_S 0 -#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0) -#define HCLGE_PF_VEC_NUM_S 0 -#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0) - __le16 pf_intr_vector_number; + __le16 msixcap_localid_number_nic; + __le16 pf_intr_vector_number_roce; __le16 pf_own_fun_number; __le16 tx_buf_size; __le16 dv_buf_size; - __le32 rsv[2]; + __le16 ext_tqp_num; + u8 rsv[6]; }; #define HCLGE_CFG_OFFSET_S 0 @@ -513,6 +518,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10) #define HCLGE_CFG_UMV_TBL_SPACE_S 16 #define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) +#define HCLGE_CFG_PF_RSS_SIZE_S 0 +#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0) #define HCLGE_CFG_CMD_CNT 4 @@ -553,18 +560,23 @@ struct hclge_rss_input_tuple_cmd { }; #define HCLGE_RSS_CFG_TBL_SIZE 16 +#define HCLGE_RSS_CFG_TBL_SIZE_H 4 +#define HCLGE_RSS_CFG_TBL_BW_H 2U +#define HCLGE_RSS_CFG_TBL_BW_L 8U struct hclge_rss_indirection_table_cmd { __le16 start_table_index; __le16 rss_set_bitmap; - u8 rsv[4]; - u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; + u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H]; + u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE]; }; #define HCLGE_RSS_TC_OFFSET_S 0 -#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0) +#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0) +#define HCLGE_RSS_TC_SIZE_MSB_B 11 #define HCLGE_RSS_TC_SIZE_S 12 #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) +#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3 #define HCLGE_RSS_TC_VALID_B 15 struct hclge_rss_tc_mode_cmd { __le16 rss_tc_mode[HCLGE_MAX_TC_NUM]; @@ -578,23 +590,26 @@ struct hclge_link_status_cmd { u8 rsv[23]; }; -struct hclge_promisc_param { - u8 vf_id; - u8 enable; -}; +/* for DEVICE_VERSION_V1/2, reference to promisc cmd byte8 */ +#define HCLGE_PROMISC_EN_UC 1 +#define HCLGE_PROMISC_EN_MC 2 +#define HCLGE_PROMISC_EN_BC 3 +#define HCLGE_PROMISC_TX_EN 4 +#define HCLGE_PROMISC_RX_EN 5 + +/* for DEVICE_VERSION_V3, reference to promisc cmd byte10 */ +#define HCLGE_PROMISC_UC_RX_EN 2 +#define HCLGE_PROMISC_MC_RX_EN 3 +#define HCLGE_PROMISC_BC_RX_EN 4 +#define HCLGE_PROMISC_UC_TX_EN 5 +#define HCLGE_PROMISC_MC_TX_EN 6 +#define HCLGE_PROMISC_BC_TX_EN 7 -#define HCLGE_PROMISC_TX_EN_B BIT(4) -#define HCLGE_PROMISC_RX_EN_B BIT(5) -#define HCLGE_PROMISC_EN_B 1 -#define HCLGE_PROMISC_EN_ALL 0x7 -#define HCLGE_PROMISC_EN_UC 0x1 -#define HCLGE_PROMISC_EN_MC 0x2 -#define HCLGE_PROMISC_EN_BC 0x4 struct hclge_promisc_cfg_cmd { - u8 flag; + u8 promisc; u8 vf_id; - __le16 rsv0; - u8 rsv1[20]; + u8 extend_promisc; + u8 rsv0[21]; }; enum hclge_promisc_type { @@ -643,7 +658,6 @@ struct hclge_config_mac_speed_dup_cmd { u8 rsv[22]; }; -#define HCLGE_RING_ID_MASK GENMASK(9, 0) #define HCLGE_TQP_ENABLE_B 0 #define HCLGE_MAC_CFG_AN_EN_B 0 @@ -818,6 +832,7 @@ enum hclge_mac_vlan_cfg_sel { #define HCLGE_CFG_NIC_ROCE_SEL_B 4 #define HCLGE_ACCEPT_TAG2_B 5 #define HCLGE_ACCEPT_UNTAG2_B 6 +#define HCLGE_TAG_SHIFT_MODE_EN_B 7 #define HCLGE_VF_NUM_PER_BYTE 8 struct hclge_vport_vtag_tx_cfg_cmd { @@ -834,6 +849,8 @@ struct hclge_vport_vtag_tx_cfg_cmd { #define HCLGE_REM_TAG2_EN_B 1 #define HCLGE_SHOW_TAG1_EN_B 2 #define HCLGE_SHOW_TAG2_EN_B 3 +#define HCLGE_DISCARD_TAG1_EN_B 5 +#define HCLGE_DISCARD_TAG2_EN_B 6 struct hclge_vport_vtag_rx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; @@ -1041,6 +1058,9 @@ struct hclge_fd_tcam_config_3_cmd { #define HCLGE_FD_AD_WR_RULE_ID_B 0 #define HCLGE_FD_AD_RULE_ID_S 1 #define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1) +#define HCLGE_FD_AD_TC_OVRD_B 16 +#define HCLGE_FD_AD_TC_SIZE_S 17 +#define HCLGE_FD_AD_TC_SIZE_M GENMASK(20, 17) struct hclge_fd_ad_config_cmd { u8 stage; @@ -1103,6 +1123,14 @@ struct hclge_dev_specs_0_cmd { __le32 max_tm_rate; }; +#define HCLGE_DEF_MAX_INT_GL 0x1FE0U + +struct hclge_dev_specs_1_cmd { + __le32 rsv0; + __le16 max_int_gl; + u8 rsv1[18]; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 3606240025a8..e08d11b8ecf1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -4,7 +4,6 @@ #include "hclge_main.h" #include "hclge_dcb.h" #include "hclge_tm.h" -#include "hclge_dcb.h" #include "hnae3.h" #define BW_PERCENT 100 @@ -398,32 +397,130 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) return 0; } +static int hclge_mqprio_qopt_check(struct hclge_dev *hdev, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + u16 queue_sum = 0; + int ret; + int i; + + if (!mqprio_qopt->qopt.num_tc) { + mqprio_qopt->qopt.num_tc = 1; + return 0; + } + + ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc, + mqprio_qopt->qopt.prio_tc_map); + if (ret) + return ret; + + for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) { + if (!is_power_of_2(mqprio_qopt->qopt.count[i])) { + dev_err(&hdev->pdev->dev, + "qopt queue count must be power of 2\n"); + return -EINVAL; + } + + if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) { + dev_err(&hdev->pdev->dev, + "qopt queue count should be no more than %u\n", + hdev->pf_rss_size_max); + return -EINVAL; + } + + if (mqprio_qopt->qopt.offset[i] != queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue offset must start from 0, and being continuous\n"); + return -EINVAL; + } + + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) { + dev_err(&hdev->pdev->dev, + "qopt tx_rate is not supported\n"); + return -EOPNOTSUPP; + } + + queue_sum = mqprio_qopt->qopt.offset[i]; + queue_sum += mqprio_qopt->qopt.count[i]; + } + if (hdev->vport[0].alloc_tqps < queue_sum) { + dev_err(&hdev->pdev->dev, + "qopt queue count sum should be less than %u\n", + hdev->vport[0].alloc_tqps); + return -EINVAL; + } + + return 0; +} + +static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + int i; + + memset(tc_info, 0, sizeof(*tc_info)); + tc_info->num_tc = mqprio_qopt->qopt.num_tc; + memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, + sizeof_field(struct hnae3_tc_info, prio_tc)); + memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count, + sizeof_field(struct hnae3_tc_info, tqp_count)); + memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, + sizeof_field(struct hnae3_tc_info, tqp_offset)); + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + set_bit(tc_info->prio_tc[i], &tc_info->tc_en); +} + +static int hclge_config_tc(struct hclge_dev *hdev, + struct hnae3_tc_info *tc_info) +{ + int i; + + hclge_tm_schd_info_update(hdev, tc_info->num_tc); + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) + hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i]; + + return hclge_map_update(hdev); +} + /* Set up TC for hardware offloaded mqprio in channel mode */ -static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) +static int hclge_setup_tc(struct hnae3_handle *h, + struct tc_mqprio_qopt_offload *mqprio_qopt) { struct hclge_vport *vport = hclge_get_vport(h); + struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info old_tc_info; + u8 tc = mqprio_qopt->qopt.num_tc; int ret; + /* if client unregistered, it's not allowed to change + * mqprio configuration, which may cause uninit ring + * fail. + */ + if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) + return -EBUSY; + if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) return -EINVAL; - ret = hclge_dcb_common_validate(hdev, tc, prio_tc); - if (ret) - return -EINVAL; + ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check mqprio qopt params, ret = %d\n", ret); + return ret; + } ret = hclge_notify_down_uinit(hdev); if (ret) return ret; - hclge_tm_schd_info_update(hdev, tc); - hclge_tm_prio_tc_info_update(hdev, prio_tc); - - ret = hclge_tm_init_hw(hdev, false); - if (ret) - goto err_out; + kinfo = &vport->nic.kinfo; + memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info)); + hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt); + kinfo->tc_info.mqprio_active = tc > 0; - ret = hclge_client_setup_tc(hdev); + ret = hclge_config_tc(hdev, &kinfo->tc_info); if (ret) goto err_out; @@ -437,6 +534,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) return hclge_notify_init_up(hdev); err_out: + /* roll-back */ + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); + if (hclge_config_tc(hdev, &kinfo->tc_info)) + dev_err(&hdev->pdev->dev, + "failed to roll back tc configuration\n"); + hclge_notify_init_up(hdev); return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 16df050e72cf..8f6dea5198cf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -498,6 +498,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); + dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag); + dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n", + le32_to_cpu(pg_shap_cfg_cmd->pg_rate)); cmd = HCLGE_OPC_TM_PORT_SHAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -508,6 +511,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", le32_to_cpu(port_shap_cfg_cmd->port_shapping_para)); + dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag); + dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n", + le32_to_cpu(port_shap_cfg_cmd->port_rate)); cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -655,6 +661,9 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); + dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag); + dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n", + le32_to_cpu(shap_cfg_cmd->pri_rate)); cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -666,6 +675,9 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); + dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag); + dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n", + le32_to_cpu(shap_cfg_cmd->pri_rate)); hclge_dbg_dump_tm_pg(hdev); @@ -681,14 +693,17 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, { struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; + u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM]; struct hclge_qs_to_pri_link_cmd *map; struct hclge_tqp_tx_queue_tc_cmd *tc; enum hclge_opcode_type cmd; struct hclge_desc desc; int queue_id, group_id; - u32 qset_mapping[32]; int tc_id, qset_id; int pri_id, ret; + u16 qs_id_l; + u16 qs_id_h; + u8 grp_num; u32 i; ret = kstrtouint(cmd_buf, 0, &queue_id); @@ -701,7 +716,24 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) goto err_tm_map_cmd_send; - qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF; + qset_id = le16_to_cpu(nq_to_qs_map->qset_id); + + /* convert qset_id to the following format, drop the vld bit + * | qs_id_h | vld | qs_id_l | + * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | + * \ \ / / + * \ \ / / + * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | + */ + qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK, + HCLGE_TM_QS_ID_L_S); + qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, + HCLGE_TM_QS_ID_H_EXT_S); + qset_id = 0; + hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, + qs_id_l); + hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, + qs_id_h); cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; map = (struct hclge_qs_to_pri_link_cmd *)desc.data; @@ -731,9 +763,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, return; } + grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ? + HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM; cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; - for (group_id = 0; group_id < 32; group_id++) { + for (group_id = 0; group_id < grp_num; group_id++) { hclge_cmd_setup_basic_desc(&desc, cmd, true); bp_to_qs_map_cmd->tc_id = tc_id; bp_to_qs_map_cmd->qs_group_id = group_id; @@ -748,7 +782,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); i = 0; - for (group_id = 0; group_id < 4; group_id++) { + for (group_id = 0; group_id < grp_num / 8; group_id++) { dev_info(&hdev->pdev->dev, "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", group_id * 256, qset_mapping[(u32)(i + 7)], @@ -1379,6 +1413,7 @@ static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) u8 ir_u, ir_b, ir_s, bs_b, bs_s; struct hclge_desc desc; u32 shapping_para; + u32 rate; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); @@ -1400,10 +1435,11 @@ static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) ir_s = hclge_tm_get_field(shapping_para, IR_S); bs_b = hclge_tm_get_field(shapping_para, BS_B); bs_s = hclge_tm_get_field(shapping_para, BS_S); + rate = le32_to_cpu(shap_cfg_cmd->qs_rate); dev_info(&hdev->pdev->dev, - "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n", - qsid, ir_b, ir_u, ir_s, bs_b, bs_s); + "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n", + qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate); } static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) @@ -1418,7 +1454,7 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { u16 qsid = vport->qs_offset + i; hclge_dbg_dump_qs_shaper_single(hdev, qsid); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1f026408ad38..e6f37f91c489 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -556,7 +556,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS, true); - desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); + desc[0].data[0] = cpu_to_le32(tqp->index); ret = hclge_cmd_send(&hdev->hw, desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -576,7 +576,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) HCLGE_OPC_QUERY_TX_STATS, true); - desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); + desc[0].data[0] = cpu_to_le32(tqp->index); ret = hclge_cmd_send(&hdev->hw, desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -886,7 +886,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) } req = (struct hclge_pf_res_cmd *)desc.data; - hdev->num_tqps = le16_to_cpu(req->tqp_num); + hdev->num_tqps = le16_to_cpu(req->tqp_num) + + le16_to_cpu(req->ext_tqp_num); hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; if (req->tx_buf_size) @@ -905,35 +906,24 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); + hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); + if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { + dev_err(&hdev->pdev->dev, + "only %u msi resources available, not enough for pf(min:2).\n", + hdev->num_nic_msi); + return -EINVAL; + } + if (hnae3_dev_roce_supported(hdev)) { - hdev->roce_base_msix_offset = - hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), - HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S); hdev->num_roce_msi = - hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); - - /* nic's msix numbers is always equals to the roce's. */ - hdev->num_nic_msi = hdev->num_roce_msi; + le16_to_cpu(req->pf_intr_vector_number_roce); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ - hdev->num_msi = hdev->num_roce_msi + - hdev->roce_base_msix_offset; + hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; } else { - hdev->num_msi = - hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number), - HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); - - hdev->num_nic_msi = hdev->num_msi; - } - - if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { - dev_err(&hdev->pdev->dev, - "Just %u msi resources, not enough for pf(min:2).\n", - hdev->num_nic_msi); - return -EINVAL; + hdev->num_msi = hdev->num_nic_msi; } return 0; @@ -1295,9 +1285,9 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]), HCLGE_CFG_DEFAULT_SPEED_M, HCLGE_CFG_DEFAULT_SPEED_S); - cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), - HCLGE_CFG_RSS_SIZE_M, - HCLGE_CFG_RSS_SIZE_S); + cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1318,6 +1308,21 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) HCLGE_CFG_UMV_TBL_SPACE_S); if (!cfg->umv_space) cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; + + cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), + HCLGE_CFG_PF_RSS_SIZE_M, + HCLGE_CFG_PF_RSS_SIZE_S); + + /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a + * power of 2, instead of reading out directly. This would + * be more flexible for future changes and expansions. + * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S, + * it does not make sense if PF's field is 0. In this case, PF and VF + * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S. + */ + cfg->pf_rss_size_max = cfg->pf_rss_size_max ? + 1U << cfg->pf_rss_size_max : + cfg->vf_rss_size_max; } /* hclge_get_cfg: query the static parameter from flash @@ -1366,6 +1371,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE; ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE; ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE; + ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; } static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@ -1373,14 +1379,18 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclge_dev_specs_0_cmd *req0; + struct hclge_dev_specs_1_cmd *req1; req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data; ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; ae_dev->dev_specs.rss_ind_tbl_size = le16_to_cpu(req0->rss_ind_tbl_size); + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate); + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -1395,6 +1405,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev) dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE; if (!dev_specs->max_tm_rate) dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE; + if (!dev_specs->max_int_gl) + dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; } static int hclge_query_dev_specs(struct hclge_dev *hdev) @@ -1472,7 +1484,8 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->num_vmdq_vport = cfg.vmdq_vport_num; hdev->base_tqp_pid = 0; - hdev->rss_size_max = cfg.rss_size_max; + hdev->vf_rss_size_max = cfg.vf_rss_size_max; + hdev->pf_rss_size_max = cfg.pf_rss_size_max; hdev->rx_buf_len = cfg.rx_buf_len; ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; @@ -1591,8 +1604,20 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev) tqp->q.buf_size = hdev->rx_buf_len; tqp->q.tx_desc_num = hdev->num_tx_desc; tqp->q.rx_desc_num = hdev->num_rx_desc; - tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + - i * HCLGE_TQP_REG_SIZE; + + /* need an extended offset to configure queues >= + * HCLGE_TQP_MAX_SIZE_DEV_V2 + */ + if (i < HCLGE_TQP_MAX_SIZE_DEV_V2) + tqp->q.io_base = hdev->hw.io_base + + HCLGE_TQP_REG_OFFSET + + i * HCLGE_TQP_REG_SIZE; + else + tqp->q.io_base = hdev->hw.io_base + + HCLGE_TQP_REG_OFFSET + + HCLGE_TQP_EXT_REG_OFFSET + + (i - HCLGE_TQP_MAX_SIZE_DEV_V2) * + HCLGE_TQP_REG_SIZE; tqp++; } @@ -1643,7 +1668,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) } } vport->alloc_tqps = alloced; - kinfo->rss_size = min_t(u16, hdev->rss_size_max, + kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, vport->alloc_tqps / hdev->tm_info.num_tc); /* ensure one to one mapping between irq and queue at default */ @@ -2405,17 +2430,18 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) { struct hnae3_handle *roce = &vport->roce; struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; roce->rinfo.num_vectors = vport->back->num_roce_msi; - if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || - vport->back->num_msi_left == 0) + if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) return -EINVAL; - roce->rinfo.base_vector = vport->back->roce_base_vector; + roce->rinfo.base_vector = hdev->roce_base_vector; roce->rinfo.netdev = nic->kinfo.netdev; - roce->rinfo.roce_io_base = vport->back->hw.io_base; + roce->rinfo.roce_io_base = hdev->hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.mem_base; roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; @@ -2449,7 +2475,7 @@ static int hclge_init_msi(struct hclge_dev *hdev) hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = hdev->base_msi_vector + - hdev->roce_base_msix_offset; + hdev->num_nic_msi; hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(u16), GFP_KERNEL); @@ -4122,6 +4148,30 @@ struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) return container_of(handle, struct hclge_vport, nic); } +static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, + struct hnae3_vector_info *vector_info) +{ +#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64 + + vector_info->vector = pci_irq_vector(hdev->pdev, idx); + + /* need an extend offset to config vector >= 64 */ + if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2) + vector_info->io_addr = hdev->hw.io_base + + HCLGE_VECTOR_REG_BASE + + (idx - 1) * HCLGE_VECTOR_REG_OFFSET; + else + vector_info->io_addr = hdev->hw.io_base + + HCLGE_VECTOR_EXT_REG_BASE + + (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * + HCLGE_VECTOR_REG_OFFSET_H + + (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 * + HCLGE_VECTOR_REG_OFFSET; + + hdev->vector_status[idx] = hdev->vport[0].vport_id; + hdev->vector_irq[idx] = vector_info->vector; +} + static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, struct hnae3_vector_info *vector_info) { @@ -4129,23 +4179,16 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, struct hnae3_vector_info *vector = vector_info; struct hclge_dev *hdev = vport->back; int alloc = 0; - int i, j; + u16 i = 0; + u16 j; vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); vector_num = min(hdev->num_msi_left, vector_num); for (j = 0; j < vector_num; j++) { - for (i = 1; i < hdev->num_msi; i++) { + while (++i < hdev->num_nic_msi) { if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { - vector->vector = pci_irq_vector(hdev->pdev, i); - vector->io_addr = hdev->hw.io_base + - HCLGE_VECTOR_REG_BASE + - (i - 1) * HCLGE_VECTOR_REG_OFFSET + - vport->vport_id * - HCLGE_VECTOR_VF_OFFSET; - hdev->vector_status[i] = vport->vport_id; - hdev->vector_irq[i] = vector->vector; - + hclge_get_vector_info(hdev, i, vector); vector++; alloc++; @@ -4235,12 +4278,16 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, return 0; } -static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir) { struct hclge_rss_indirection_table_cmd *req; struct hclge_desc desc; - int i, j; + u8 rss_msb_oft; + u8 rss_msb_val; int ret; + u16 qid; + int i; + u32 j; req = (struct hclge_rss_indirection_table_cmd *)desc.data; @@ -4251,11 +4298,15 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir) req->start_table_index = cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); - - for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) - req->rss_result[j] = - indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; - + for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) { + qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; + req->rss_qid_l[j] = qid & 0xff; + rss_msb_oft = + j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE; + rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) << + (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE); + req->rss_qid_h[rss_msb_oft] |= rss_msb_val; + } ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -4284,6 +4335,8 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M, HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B, + tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1); hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M, HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); @@ -4574,21 +4627,58 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return hdev->rss_size_max; + return hdev->pf_rss_size_max; } -int hclge_rss_init_hw(struct hclge_dev *hdev) +static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) { + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct hclge_vport *vport = hdev->vport; - u8 *rss_indir = vport[0].rss_indirection_tbl; - u16 rss_size = vport[0].alloc_rss_size; u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM] = {0}; u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + struct hnae3_tc_info *tc_info; + u16 roundup_size; + u16 rss_size; + int i; + + tc_info = &vport->nic.kinfo.tc_info; + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + rss_size = tc_info->tqp_count[i]; + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + /* tc_size set to hardware is the log2 of roundup power of two + * of rss_size, the acutal queue size is limited by indirection + * table. + */ + if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size || + rss_size == 0) { + dev_err(&hdev->pdev->dev, + "Configure rss tc size failed, invalid TC_SIZE = %u\n", + rss_size); + return -EINVAL; + } + + roundup_size = roundup_pow_of_two(rss_size); + roundup_size = ilog2(roundup_size); + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = tc_info->tqp_offset[i]; + } + + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); +} + +int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 *rss_indir = vport[0].rss_indirection_tbl; u8 *key = vport[0].rss_hash_key; u8 hfunc = vport[0].rss_algo; - u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 roundup_size; - unsigned int i; int ret; ret = hclge_set_rss_indir_table(hdev, rss_indir); @@ -4603,32 +4693,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) if (ret) return ret; - /* Each TC have the same queue size, and tc_size set to hardware is - * the log2 of roundup power of two of rss_size, the acutal queue - * size is limited by indirection table. - */ - if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { - dev_err(&hdev->pdev->dev, - "Configure rss tc size failed, invalid TC_SIZE = %u\n", - rss_size); - return -EINVAL; - } - - roundup_size = roundup_pow_of_two(rss_size); - roundup_size = ilog2(roundup_size); - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - tc_valid[i] = 0; - - if (!(hdev->hw_tc_map & BIT(i))) - continue; - - tc_valid[i] = 1; - tc_size[i] = roundup_size; - tc_offset[i] = rss_size * i; - } - - return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + return hclge_init_rss_tc_mode(hdev); } void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) @@ -4694,7 +4759,12 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR; hclge_cmd_setup_basic_desc(&desc, op, false); - req->int_vector_id = vector_id; + req->int_vector_id_l = hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_L_M, + HCLGE_VECTOR_ID_L_S); + req->int_vector_id_h = hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_H_M, + HCLGE_VECTOR_ID_H_S); i = 0; for (node = ring_chain; node; node = node->next) { @@ -4726,7 +4796,14 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport, hclge_cmd_setup_basic_desc(&desc, op, false); - req->int_vector_id = vector_id; + req->int_vector_id_l = + hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_L_M, + HCLGE_VECTOR_ID_L_S); + req->int_vector_id_h = + hnae3_get_field(vector_id, + HCLGE_VECTOR_ID_H_M, + HCLGE_VECTOR_ID_H_S); } } @@ -4787,61 +4864,56 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, return ret; } -static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, - struct hclge_promisc_param *param) +static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, + bool en_uc, bool en_mc, bool en_bc) { + struct hclge_vport *vport = &hdev->vport[vf_id]; + struct hnae3_handle *handle = &vport->nic; struct hclge_promisc_cfg_cmd *req; struct hclge_desc desc; + bool uc_tx_en = en_uc; + u8 promisc_cfg = 0; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); req = (struct hclge_promisc_cfg_cmd *)desc.data; - req->vf_id = param->vf_id; + req->vf_id = vf_id; - /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - */ - req->flag = (param->enable << HCLGE_PROMISC_EN_B) | - HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; + if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags)) + uc_tx_en = false; + + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0); + req->extend_promisc = promisc_cfg; + + /* to be compatible with DEVICE_VERSION_V1/2 */ + promisc_cfg = 0; + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1); + hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1); + req->promisc = promisc_cfg; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(&hdev->pdev->dev, - "failed to set vport %d promisc mode, ret = %d.\n", - param->vf_id, ret); + "failed to set vport %u promisc mode, ret = %d.\n", + vf_id, ret); return ret; } -static void hclge_promisc_param_init(struct hclge_promisc_param *param, - bool en_uc, bool en_mc, bool en_bc, - int vport_id) -{ - if (!param) - return; - - memset(param, 0, sizeof(struct hclge_promisc_param)); - if (en_uc) - param->enable = HCLGE_PROMISC_EN_UC; - if (en_mc) - param->enable |= HCLGE_PROMISC_EN_MC; - if (en_bc) - param->enable |= HCLGE_PROMISC_EN_BC; - param->vf_id = vport_id; -} - int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc) { - struct hclge_dev *hdev = vport->back; - struct hclge_promisc_param param; - - hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, - vport->vport_id); - return hclge_cmd_set_promisc_mode(hdev, ¶m); + return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id, + en_uc_pmc, en_mc_pmc, en_bc_pmc); } static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, @@ -4976,7 +5048,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) } key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; - key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE, + key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE; key_cfg->inner_sipv6_word_en = LOW_2_WORDS; key_cfg->inner_dipv6_word_en = LOW_2_WORDS; key_cfg->outer_sipv6_word_en = 0; @@ -5053,6 +5125,7 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, struct hclge_fd_ad_data *action) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclge_fd_ad_config_cmd *req; struct hclge_desc desc; u64 ad_data = 0; @@ -5068,6 +5141,12 @@ static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, action->write_rule_id_to_bd); hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S, action->rule_id); + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { + hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B, + action->override_tc); + hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M, + HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size); + } ad_data <<= 32; hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet); hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B, @@ -5311,16 +5390,22 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, static int hclge_config_action(struct hclge_dev *hdev, u8 stage, struct hclge_fd_rule *rule) { + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_fd_ad_data ad_data; + memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data)); ad_data.ad_id = rule->location; if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { ad_data.drop_packet = true; - ad_data.forward_to_direct_queue = false; - ad_data.queue_id = 0; + } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) { + ad_data.override_tc = true; + ad_data.queue_id = + kinfo->tc_info.tqp_offset[rule->cls_flower.tc]; + ad_data.tc_size = + ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]); } else { - ad_data.drop_packet = false; ad_data.forward_to_direct_queue = true; ad_data.queue_id = rule->queue_id; } @@ -5837,6 +5922,14 @@ clear_rule: return ret; } +static bool hclge_is_cls_flower_active(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; +} + static int hclge_add_fd_entry(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { @@ -5861,6 +5954,12 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EOPNOTSUPP; } + if (hclge_is_cls_flower_active(handle)) { + dev_err(&hdev->pdev->dev, + "please delete all exist cls flower rules first\n"); + return -EINVAL; + } + fs = (struct ethtool_rx_flow_spec *)&cmd->fs; ret = hclge_fd_check_spec(hdev, fs, &unused); @@ -5891,7 +5990,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EINVAL; } - action = HCLGE_FD_ACTION_ACCEPT_PACKET; + action = HCLGE_FD_ACTION_SELECT_QUEUE; q_index = ring; } @@ -5942,7 +6041,8 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) return -EINVAL; - if (!hclge_fd_rule_exist(hdev, fs->location)) { + if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || + !hclge_fd_rule_exist(hdev, fs->location)) { dev_err(&hdev->pdev->dev, "Delete fail, rule %u is inexistent\n", fs->location); return -ENOENT; @@ -6042,7 +6142,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - if (!hnae3_dev_fd_supported(hdev)) + if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle)) return -EOPNOTSUPP; cmd->rule_cnt = hdev->hclge_fd_rule_num; @@ -6385,7 +6485,8 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, * arfs should not work */ spin_lock_bh(&hdev->fd_rule_lock); - if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { + if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && + hdev->fd_active_type != HCLGE_FD_RULE_NONE) { spin_unlock_bh(&hdev->fd_rule_lock); return -EOPNOTSUPP; } @@ -6413,7 +6514,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id, set_bit(bit_id, hdev->fd_bmap); rule->location = bit_id; - rule->flow_id = flow_id; + rule->arfs.flow_id = flow_id; rule->queue_id = queue_id; hclge_fd_build_arfs_rule(&new_tuples, rule); ret = hclge_fd_config_rule(hdev, rule); @@ -6457,7 +6558,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev) } hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { if (rps_may_expire_flow(handle->netdev, rule->queue_id, - rule->flow_id, rule->location)) { + rule->arfs.flow_id, rule->location)) { hlist_del_init(&rule->rule_node); hlist_add_head(&rule->rule_node, &del_list); hdev->hclge_fd_rule_num--; @@ -6486,6 +6587,286 @@ static void hclge_clear_arfs_rules(struct hnae3_handle *handle) #endif } +static void hclge_get_cls_key_basic(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + u16 ethtype_key, ethtype_mask; + + flow_rule_match_basic(flow, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); + + if (ethtype_key == ETH_P_ALL) { + ethtype_key = 0; + ethtype_mask = 0; + } + rule->tuples.ether_proto = ethtype_key; + rule->tuples_mask.ether_proto = ethtype_mask; + rule->tuples.ip_proto = match.key->ip_proto; + rule->tuples_mask.ip_proto = match.mask->ip_proto; + } else { + rule->unused_tuple |= BIT(INNER_IP_PROTO); + rule->unused_tuple |= BIT(INNER_ETH_TYPE); + } +} + +static void hclge_get_cls_key_mac(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(flow, &match); + ether_addr_copy(rule->tuples.dst_mac, match.key->dst); + ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst); + ether_addr_copy(rule->tuples.src_mac, match.key->src); + ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src); + } else { + rule->unused_tuple |= BIT(INNER_DST_MAC); + rule->unused_tuple |= BIT(INNER_SRC_MAC); + } +} + +static void hclge_get_cls_key_vlan(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(flow, &match); + rule->tuples.vlan_tag1 = match.key->vlan_id | + (match.key->vlan_priority << VLAN_PRIO_SHIFT); + rule->tuples_mask.vlan_tag1 = match.mask->vlan_id | + (match.mask->vlan_priority << VLAN_PRIO_SHIFT); + } else { + rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST); + } +} + +static void hclge_get_cls_key_ip(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + u16 addr_type = 0; + + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(flow, &match); + addr_type = match.key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(flow, &match); + rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src); + rule->tuples_mask.src_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->src); + rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst); + rule->tuples_mask.dst_ip[IPV4_INDEX] = + be32_to_cpu(match.mask->dst); + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(flow, &match); + be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.src_ip, + match.mask->src.s6_addr32, IPV6_SIZE); + be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32, + IPV6_SIZE); + be32_to_cpu_array(rule->tuples_mask.dst_ip, + match.mask->dst.s6_addr32, IPV6_SIZE); + } else { + rule->unused_tuple |= BIT(INNER_SRC_IP); + rule->unused_tuple |= BIT(INNER_DST_IP); + } +} + +static void hclge_get_cls_key_port(const struct flow_rule *flow, + struct hclge_fd_rule *rule) +{ + if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(flow, &match); + + rule->tuples.src_port = be16_to_cpu(match.key->src); + rule->tuples_mask.src_port = be16_to_cpu(match.mask->src); + rule->tuples.dst_port = be16_to_cpu(match.key->dst); + rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst); + } else { + rule->unused_tuple |= BIT(INNER_SRC_PORT); + rule->unused_tuple |= BIT(INNER_DST_PORT); + } +} + +static int hclge_parse_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, + struct hclge_fd_rule *rule) +{ + struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower); + struct flow_dissector *dissector = flow->match.dissector; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n", + dissector->used_keys); + return -EOPNOTSUPP; + } + + hclge_get_cls_key_basic(flow, rule); + hclge_get_cls_key_mac(flow, rule); + hclge_get_cls_key_vlan(flow, rule); + hclge_get_cls_key_ip(flow, rule); + hclge_get_cls_key_port(flow, rule); + + return 0; +} + +static int hclge_check_cls_flower(struct hclge_dev *hdev, + struct flow_cls_offload *cls_flower, int tc) +{ + u32 prio = cls_flower->common.prio; + + if (tc < 0 || tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, "invalid traffic class\n"); + return -EINVAL; + } + + if (prio == 0 || + prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { + dev_err(&hdev->pdev->dev, + "prio %u should be in range[1, %u]\n", + prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); + return -EINVAL; + } + + if (test_bit(prio - 1, hdev->fd_bmap)) { + dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); + return -EINVAL; + } + return 0; +} + +static int hclge_add_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower, + int tc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) { + dev_err(&hdev->pdev->dev, + "please remove all exist fd rules via ethtool first\n"); + return -EINVAL; + } + + ret = hclge_check_cls_flower(hdev, cls_flower, tc); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to check cls flower params, ret = %d\n", ret); + return ret; + } + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = hclge_parse_cls_flower(hdev, cls_flower, rule); + if (ret) + goto err; + + rule->action = HCLGE_FD_ACTION_SELECT_TC; + rule->cls_flower.tc = tc; + rule->location = cls_flower->common.prio - 1; + rule->vf_id = 0; + rule->cls_flower.cookie = cls_flower->cookie; + rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE; + + spin_lock_bh(&hdev->fd_rule_lock); + hclge_clear_arfs_rules(handle); + + ret = hclge_fd_config_rule(hdev, rule); + + spin_unlock_bh(&hdev->fd_rule_lock); + + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to add cls flower rule, ret = %d\n", ret); + goto err; + } + + return 0; +err: + kfree(rule); + return ret; +} + +static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, + unsigned long cookie) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + if (rule->cls_flower.cookie == cookie) + return rule; + } + + return NULL; +} + +static int hclge_del_cls_flower(struct hnae3_handle *handle, + struct flow_cls_offload *cls_flower) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + rule = hclge_find_cls_flower(hdev, cls_flower->cookie); + if (!rule) { + spin_unlock_bh(&hdev->fd_rule_lock); + return -EINVAL; + } + + ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, + NULL, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to delete cls flower rule %u, ret = %d\n", + rule->location, ret); + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; + } + + ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to delete cls flower rule %u in list, ret = %d\n", + rule->location, ret); + spin_unlock_bh(&hdev->fd_rule_lock); + return ret; + } + + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -6845,7 +7226,7 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id, int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); - req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); + req->tqp_id = cpu_to_le16(tqp_id); req->stream_id = cpu_to_le16(stream_id); if (enable) req->enable |= 1U << HCLGE_TQP_ENABLE_B; @@ -8583,6 +8964,8 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) vcfg->insert_tag1_en ? 1 : 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, vcfg->insert_tag2_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B, + vcfg->tag_shift_mode_en ? 1 : 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; @@ -8620,6 +9003,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) vcfg->vlan1_vlan_prionly ? 1 : 0); hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, vcfg->vlan2_vlan_prionly ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B, + vcfg->strip_tag1_discard_en ? 1 : 0); + hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B, + vcfg->strip_tag2_discard_en ? 1 : 0); req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD / @@ -8647,7 +9034,10 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport, vport->txvlan_cfg.insert_tag1_en = false; vport->txvlan_cfg.default_tag1 = 0; } else { - vport->txvlan_cfg.accept_tag1 = false; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); + + vport->txvlan_cfg.accept_tag1 = + ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3; vport->txvlan_cfg.insert_tag1_en = true; vport->txvlan_cfg.default_tag1 = vlan_tag; } @@ -8662,16 +9052,21 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport, vport->txvlan_cfg.accept_untag2 = true; vport->txvlan_cfg.insert_tag2_en = false; vport->txvlan_cfg.default_tag2 = 0; + vport->txvlan_cfg.tag_shift_mode_en = true; if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { vport->rxvlan_cfg.strip_tag1_en = false; vport->rxvlan_cfg.strip_tag2_en = vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_discard_en = false; } else { vport->rxvlan_cfg.strip_tag1_en = vport->rxvlan_cfg.rx_vlan_offload_en; vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; @@ -8966,10 +9361,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { vport->rxvlan_cfg.strip_tag1_en = false; vport->rxvlan_cfg.strip_tag2_en = enable; + vport->rxvlan_cfg.strip_tag2_discard_en = false; } else { vport->rxvlan_cfg.strip_tag1_en = enable; vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.strip_tag2_discard_en = true; } + + vport->rxvlan_cfg.strip_tag1_discard_en = false; vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; vport->rxvlan_cfg.rx_vlan_offload_en = enable; @@ -9081,6 +9480,7 @@ static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_vlan_info vlan_info; @@ -9110,16 +9510,25 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, vlan_info.qos = qos; vlan_info.vlan_proto = ntohs(proto); - if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { - return hclge_update_port_base_vlan_cfg(vport, state, - &vlan_info); - } else { - ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], - vport->vport_id, state, - vlan, qos, - ntohs(proto)); + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to update port base vlan for vf %d, ret = %d\n", + vfid, ret); return ret; } + + /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based + * VLAN state. + */ + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && + test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + vport->vport_id, state, + vlan, qos, + ntohs(proto)); + + return 0; } static void hclge_clear_vf_vlan(struct hclge_dev *hdev) @@ -9307,7 +9716,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); req = (struct hclge_reset_tqp_queue_cmd *)desc.data; - req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + req->tqp_id = cpu_to_le16(queue_id); if (enable) hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U); @@ -9330,7 +9739,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); req = (struct hclge_reset_tqp_queue_cmd *)desc.data; - req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + req->tqp_id = cpu_to_le16(queue_id); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -9870,6 +10279,28 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, } } +static int hclge_dev_mem_map(struct hclge_dev *hdev) +{ +#define HCLGE_MEM_BAR 4 + + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw = &hdev->hw; + + /* for device does not have device memory, return directly */ + if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR))) + return 0; + + hw->mem_base = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, HCLGE_MEM_BAR), + pci_resource_len(pdev, HCLGE_MEM_BAR)); + if (!hw->mem_base) { + dev_err(&pdev->dev, "failed to map device memory\n"); + return -EFAULT; + } + + return 0; +} + static int hclge_pci_init(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -9908,9 +10339,16 @@ static int hclge_pci_init(struct hclge_dev *hdev) goto err_clr_master; } + ret = hclge_dev_mem_map(hdev); + if (ret) + goto err_unmap_io_base; + hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); return 0; + +err_unmap_io_base: + pcim_iounmap(pdev, hdev->hw.io_base); err_clr_master: pci_clear_master(pdev); pci_release_regions(pdev); @@ -9924,6 +10362,9 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + if (hdev->hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.mem_base); + pcim_iounmap(pdev, hdev->hw.io_base); pci_free_irq_vectors(pdev); pci_clear_master(pdev); @@ -10600,12 +11041,10 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static u32 hclge_get_max_channels(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return min_t(u32, hdev->rss_size_max, - vport->alloc_tqps / kinfo->num_tc); + return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); } static void hclge_get_channels(struct hnae3_handle *handle, @@ -10624,7 +11063,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, struct hclge_dev *hdev = vport->back; *alloc_tqps = vport->alloc_tqps; - *max_rss_size = hdev->rss_size_max; + *max_rss_size = hdev->pf_rss_size_max; } static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, @@ -10692,7 +11131,7 @@ out: dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, - cur_tqps, kinfo->rss_size * kinfo->num_tc); + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); return ret; } @@ -11425,6 +11864,9 @@ static const struct hnae3_ae_ops hclge_ops = { .set_vf_mac = hclge_set_vf_mac, .get_module_eeprom = hclge_get_module_eeprom, .get_cmdq_stat = hclge_get_cmdq_stat, + .add_cls_flower = hclge_add_cls_flower, + .del_cls_flower = hclge_del_cls_flower, + .cls_flower_active = hclge_is_cls_flower_active, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 64e6afdb61b8..50a294dfaff5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -27,9 +27,11 @@ (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) #define HCLGE_VECTOR_REG_BASE 0x20000 +#define HCLGE_VECTOR_EXT_REG_BASE 0x30000 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400 #define HCLGE_VECTOR_REG_OFFSET 0x4 +#define HCLGE_VECTOR_REG_OFFSET_H 0x1000 #define HCLGE_VECTOR_VF_OFFSET 0x100000 #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 @@ -278,6 +280,7 @@ struct hclge_mac { struct hclge_hw { void __iomem *io_base; + void __iomem *mem_base; struct hclge_mac mac; int num_vec; struct hclge_cmq cmq; @@ -345,7 +348,8 @@ struct hclge_cfg { u8 tc_num; u16 tqp_desc_num; u16 rx_buf_len; - u16 rss_size_max; + u16 vf_rss_size_max; + u16 pf_rss_size_max; u8 phy_addr; u8 media_type; u8 mac_addr[ETH_ALEN]; @@ -561,6 +565,7 @@ enum HCLGE_FD_ACTIVE_RULE_TYPE { HCLGE_FD_RULE_NONE, HCLGE_FD_ARFS_ACTIVE, HCLGE_FD_EP_ACTIVE, + HCLGE_FD_TC_FLOWER_ACTIVE, }; enum HCLGE_FD_PACKET_TYPE { @@ -569,8 +574,9 @@ enum HCLGE_FD_PACKET_TYPE { }; enum HCLGE_FD_ACTION { - HCLGE_FD_ACTION_ACCEPT_PACKET, + HCLGE_FD_ACTION_SELECT_QUEUE, HCLGE_FD_ACTION_DROP_PACKET, + HCLGE_FD_ACTION_SELECT_TC, }; struct hclge_fd_key_cfg { @@ -615,12 +621,20 @@ struct hclge_fd_rule { struct hclge_fd_rule_tuples tuples_mask; u32 unused_tuple; u32 flow_type; - u8 action; - u16 vf_id; + union { + struct { + unsigned long cookie; + u8 tc; + } cls_flower; + struct { + u16 flow_id; /* only used for arfs */ + } arfs; + }; u16 queue_id; + u16 vf_id; u16 location; - u16 flow_id; /* only used for arfs */ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type; + u8 action; }; struct hclge_fd_ad_data { @@ -634,6 +648,8 @@ struct hclge_fd_ad_data { u8 write_rule_id_to_bd; u8 next_input_key; u16 rule_id; + u16 tc_size; + u8 override_tc; }; enum HCLGE_MAC_NODE_STATE { @@ -742,7 +758,8 @@ struct hclge_dev { u16 base_tqp_pid; /* Base task tqp physical id of this PF */ u16 alloc_rss_size; /* Allocated RSS task queue */ - u16 rss_size_max; /* HW defined max RSS task queue */ + u16 vf_rss_size_max; /* HW defined VF max RSS task queue */ + u16 pf_rss_size_max; /* HW defined PF max RSS task queue */ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ u16 num_alloc_vport; /* Num vports this driver supports */ @@ -767,7 +784,6 @@ struct hclge_dev { u16 num_msi; u16 num_msi_left; u16 num_msi_used; - u16 roce_base_msix_offset; u32 base_msi_vector; u16 *vector_status; int *vector_irq; @@ -848,15 +864,18 @@ struct hclge_tx_vtag_cfg { bool insert_tag2_en; /* Whether insert outer vlan tag */ u16 default_tag1; /* The default inner vlan tag to insert */ u16 default_tag2; /* The default outer vlan tag to insert */ + bool tag_shift_mode_en; }; /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ - u8 strip_tag1_en; /* Whether strip inner vlan tag */ - u8 strip_tag2_en; /* Whether strip outer vlan tag */ - u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ - u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ + bool rx_vlan_offload_en; /* Whether enable rx vlan offload */ + bool strip_tag1_en; /* Whether strip inner vlan tag */ + bool strip_tag2_en; /* Whether strip outer vlan tag */ + bool vlan1_vlan_prionly; /* Inner vlan tag up to descriptor enable */ + bool vlan2_vlan_prionly; /* Outer vlan tag up to descriptor enable */ + bool strip_tag1_discard_en; /* Inner vlan tag discard for BD enable */ + bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */ }; struct hclge_rss_tuple_cfg { @@ -901,7 +920,7 @@ struct hclge_vport { u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ /* User configured lookup table entries */ - u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; int rss_algo; /* User configured hash algorithm */ /* User configured rss tuple sets */ struct hclge_rss_tuple_cfg rss_tuple_sets; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 3ab6db2588d3..754c09ada901 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -227,6 +227,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, bool en_bc = req->msg.en_bc ? true : false; bool en_uc = req->msg.en_uc ? true : false; bool en_mc = req->msg.en_mc ? true : false; + struct hnae3_handle *handle = &vport->nic; int ret; if (!vport->vf_info.trusted) { @@ -234,6 +235,12 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, en_mc = false; } + if (req->msg.en_limit_promisc) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); + else + clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags); + ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc); vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0; @@ -371,7 +378,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, status = hclge_update_port_base_vlan_cfg(vport, *state, vlan_info); } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { - resp_msg->data[0] = vport->port_base_vlan_cfg.state; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev); + /* vf does not need to know about the port based VLAN state + * on device HNAE3_DEVICE_VERSION_V3. So always return disable + * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port + * based VLAN state. + */ + resp_msg->data[0] = + ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? + HNAE3_PORT_BASE_VLAN_DISABLE : + vport->port_base_vlan_cfg.state; resp_msg->len = sizeof(u8); } @@ -398,7 +414,7 @@ static void hclge_get_vf_tcinfo(struct hclge_vport *vport, struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; unsigned int i; - for (i = 0; i < kinfo->num_tc; i++) + for (i = 0; i < kinfo->tc_info.num_tc; i++) resp_msg->data[0] |= BIT(i); resp_msg->len = sizeof(u8); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index e8495f58a1a8..82742a64f3b7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -302,12 +302,30 @@ static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, { struct hclge_nq_to_qs_link_cmd *map; struct hclge_desc desc; + u16 qs_id_l; + u16 qs_id_h; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); map = (struct hclge_nq_to_qs_link_cmd *)desc.data; map->nq_id = cpu_to_le16(q_id); + + /* convert qs_id to the following format to support qset_id >= 1024 + * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | + * / / \ \ + * / / \ \ + * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | + * | qs_id_h | vld | qs_id_l | + */ + qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, + HCLGE_TM_QS_ID_L_S); + qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, + HCLGE_TM_QS_ID_H_S); + hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, + qs_id_l); + hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, + qs_id_h); map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); return hclge_cmd_send(&hdev->hw, &desc, 1); @@ -377,7 +395,7 @@ static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pg_id, - u32 shapping_para) + u32 shapping_para, u32 rate) { struct hclge_pg_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; @@ -393,6 +411,10 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->pg_rate = cpu_to_le32(rate); + return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -420,12 +442,16 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); + return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pri_id, - u32 shapping_para) + u32 shapping_para, u32 rate) { struct hclge_pri_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; @@ -442,6 +468,10 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + + shap_cfg_cmd->pri_rate = cpu_to_le32(rate); + return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -535,7 +565,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, false); @@ -543,6 +573,9 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); + hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); + shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -556,23 +589,66 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) return 0; } +static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + u16 max_rss_size = 0; + int i; + + if (!tc_info->mqprio_active) + return vport->alloc_tqps / tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) + continue; + if (max_rss_size < tc_info->tqp_count[i]) + max_rss_size = tc_info->tqp_count[i]; + } + + return max_rss_size; +} + +static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; + struct hclge_dev *hdev = vport->back; + int sum = 0; + int i; + + if (!tc_info->mqprio_active) + return kinfo->rss_size * tc_info->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) + sum += tc_info->tqp_count[i]; + } + + return sum; +} + static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; + u16 vport_max_rss_size; u16 max_rss_size; u8 i; /* TC configuration is shared by PF/VF in one port, only allow * one tc for VF for simplicity. VF's vport_id is non zero. */ - kinfo->num_tc = vport->vport_id ? 1 : + kinfo->tc_info.num_tc = vport->vport_id ? 1 : min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) + (vport->vport_id ? (vport->vport_id - 1) : 0); - max_rss_size = min_t(u16, hdev->rss_size_max, - vport->alloc_tqps / kinfo->num_tc); + vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max : + hdev->pf_rss_size_max; + max_rss_size = min_t(u16, vport_max_rss_size, + hclge_vport_get_max_rss_size(vport)); /* Set to user value, no larger than max_rss_size. */ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && @@ -589,34 +665,36 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) if (!kinfo->req_rss_size) max_rss_size = min_t(u16, max_rss_size, (hdev->num_nic_msi - 1) / - kinfo->num_tc); + kinfo->tc_info.num_tc); /* Set to the maximum specification value (max_rss_size). */ kinfo->rss_size = max_rss_size; } - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; + kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->alloc_rss_size = kinfo->rss_size; vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + /* when enable mqprio, the tc_info has been updated. */ + if (kinfo->tc_info.mqprio_active) + return; + for (i = 0; i < HNAE3_MAX_TC; i++) { - if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { - kinfo->tc_info[i].enable = true; - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; - kinfo->tc_info[i].tqp_count = kinfo->rss_size; - kinfo->tc_info[i].tc = i; + if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { + set_bit(i, &kinfo->tc_info.tc_en); + kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; + kinfo->tc_info.tqp_count[i] = kinfo->rss_size; } else { /* Set to default queue if TC is disable */ - kinfo->tc_info[i].enable = false; - kinfo->tc_info[i].tqp_offset = 0; - kinfo->tc_info[i].tqp_count = 1; - kinfo->tc_info[i].tc = 0; + clear_bit(i, &kinfo->tc_info.tc_en); + kinfo->tc_info.tqp_offset[i] = 0; + kinfo->tc_info.tqp_count[i] = 1; } } - memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, - sizeof_field(struct hnae3_knic_private_info, prio_tc)); + memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, + sizeof_field(struct hnae3_tc_info, prio_tc)); } static void hclge_tm_vport_info_update(struct hclge_dev *hdev) @@ -682,7 +760,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) } } -static void hclge_pfc_info_init(struct hclge_dev *hdev) +static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) { if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { if (hdev->fc_mode_last_time == HCLGE_FC_PFC) @@ -700,6 +778,27 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev) } } +static void hclge_update_fc_mode(struct hclge_dev *hdev) +{ + if (!hdev->tm_info.pfc_en) { + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + return; + } + + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.fc_mode = HCLGE_FC_PFC; + } +} + +static void hclge_pfc_info_init(struct hclge_dev *hdev) +{ + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + hclge_update_fc_mode(hdev); + else + hclge_update_fc_mode_by_dcb_flag(hdev); +} + static void hclge_tm_schd_info_init(struct hclge_dev *hdev) { hclge_tm_pg_info_init(hdev); @@ -744,9 +843,10 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) /* Pg to pri */ for (i = 0; i < hdev->tm_info.num_pg; i++) { + u32 rate = hdev->tm_info.pg_info[i].bw_limit; + /* Calc shaper para */ - ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit, - HCLGE_SHAPER_LVL_PG, + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, &ir_para, max_tm_rate); if (ret) return ret; @@ -756,7 +856,7 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para); + shaper_para, rate); if (ret) return ret; @@ -767,7 +867,7 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pg_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para); + shaper_para, rate); if (ret) return ret; } @@ -799,15 +899,14 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_tc_info *tc_info = &kinfo->tc_info; struct hnae3_queue **tqp = kinfo->tqp; - struct hnae3_tc_info *v_tc_info; u32 i, j; int ret; - for (i = 0; i < kinfo->num_tc; i++) { - v_tc_info = &kinfo->tc_info[i]; - for (j = 0; j < v_tc_info->tqp_count; j++) { - struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; + for (i = 0; i < tc_info->num_tc; i++) { + for (j = 0; j < tc_info->tqp_count[i]; j++) { + struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; ret = hclge_tm_q_to_qs_map_cfg(hdev, hclge_get_queue_id(q), @@ -832,7 +931,7 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_to_pri_map_cfg( hdev, vport[k].qs_offset + i, i); if (ret) @@ -873,8 +972,9 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) u32 i; for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, - HCLGE_SHAPER_LVL_PRI, + u32 rate = hdev->tm_info.tc_info[i].bw_limit; + + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, &ir_para, max_tm_rate); if (ret) return ret; @@ -883,7 +983,7 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para); + shaper_para, rate); if (ret) return ret; @@ -893,7 +993,7 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para); + shaper_para, rate); if (ret) return ret; } @@ -918,7 +1018,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, - vport->vport_id, shaper_para); + vport->vport_id, shaper_para, + vport->bw_limit); if (ret) return ret; @@ -927,7 +1028,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) HCLGE_SHAPER_BS_U_DEF, HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, - vport->vport_id, shaper_para); + vport->vport_id, shaper_para, + vport->bw_limit); if (ret) return ret; @@ -943,7 +1045,7 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) u32 i; int ret; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, HCLGE_SHAPER_LVL_QSET, &ir_para, max_tm_rate); @@ -1065,7 +1167,7 @@ static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) return ret; /* Qset dwrr */ - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { ret = hclge_tm_qs_weight_cfg( hdev, vport->qs_offset + i, hdev->tm_info.pg_info[0].tc_dwrr[i]); @@ -1196,7 +1298,7 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) if (ret) return ret; - for (i = 0; i < kinfo->num_tc; i++) { + for (i = 0; i < kinfo->tc_info.num_tc; i++) { u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, @@ -1296,15 +1398,23 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) hdev->tm_info.pfc_en); } -/* Each Tc has a 1024 queue sets to backpress, it divides to - * 32 group, each group contains 32 queue sets, which can be - * represented by u32 bitmap. +/* for the queues that use for backpress, divides to several groups, + * each group contains 32 queue sets, which can be represented by u32 bitmap. */ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) { + u16 grp_id_shift = HCLGE_BP_GRP_ID_S; + u16 grp_id_mask = HCLGE_BP_GRP_ID_M; + u8 grp_num = HCLGE_BP_GRP_NUM; int i; - for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { + if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { + grp_num = HCLGE_BP_EXT_GRP_NUM; + grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; + grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; + } + + for (i = 0; i < grp_num; i++) { u32 qs_bitmap = 0; int k, ret; @@ -1313,8 +1423,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) u16 qs_id = vport->qs_offset + tc; u8 grp, sub_grp; - grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, - HCLGE_BP_GRP_ID_S); + grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, HCLGE_BP_SUB_GRP_ID_S); if (i == grp) @@ -1419,7 +1528,7 @@ void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) for (k = 0; k < hdev->num_alloc_vport; k++) { kinfo = &vport[k].nic.kinfo; - kinfo->prio_tc[i] = prio_tc[i]; + kinfo->tc_info.prio_tc[i] = prio_tc[i]; } } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index bb2a2d8e9259..5498d73ed34b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -39,6 +39,12 @@ struct hclge_nq_to_qs_link_cmd { __le16 nq_id; __le16 rsvd; #define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) +#define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0) +#define HCLGE_TM_QS_ID_L_S 0 +#define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10) +#define HCLGE_TM_QS_ID_H_S 10 +#define HCLGE_TM_QS_ID_H_EXT_S 11 +#define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11) __le16 qset_id; }; @@ -86,22 +92,34 @@ enum hclge_shap_bucket { HCLGE_TM_SHAP_P_BUCKET, }; +/* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */ +#define HCLGE_TM_RATE_VLD 0 + struct hclge_pri_shapping_cmd { u8 pri_id; u8 rsvd[3]; __le32 pri_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 pri_rate; }; struct hclge_pg_shapping_cmd { u8 pg_id; u8 rsvd[3]; __le32 pg_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 pg_rate; }; struct hclge_qs_shapping_cmd { __le16 qs_id; u8 rsvd[2]; __le32 qs_shapping_para; + u8 flag; + u8 rsvd1[3]; + __le32 qs_rate; }; #define HCLGE_BP_GRP_NUM 32 @@ -109,6 +127,11 @@ struct hclge_qs_shapping_cmd { #define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) #define HCLGE_BP_GRP_ID_S 5 #define HCLGE_BP_GRP_ID_M GENMASK(9, 5) + +#define HCLGE_BP_EXT_GRP_NUM 40 +#define HCLGE_BP_EXT_GRP_ID_S 5 +#define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5) + struct hclge_bp_to_qs_map_cmd { u8 tc_id; u8 rsvd[2]; @@ -139,6 +162,9 @@ struct hclge_pfc_stats_cmd { struct hclge_port_shapping_cmd { __le32 port_shapping_para; + u8 flag; + u8 rsvd[3]; + __le32 port_rate; }; struct hclge_shaper_ir_para { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 66866c1cfb12..e04c0cfeb95c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -336,6 +336,10 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev, set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B)) set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B)) + set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); + if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B)) + set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); } static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 9460c128c095..82eed258e8c1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -111,6 +111,9 @@ enum hclgevf_opcode_type { #define HCLGEVF_TQP_REG_OFFSET 0x80000 #define HCLGEVF_TQP_REG_SIZE 0x200 +#define HCLGEVF_TQP_MAX_SIZE_DEV_V2 1024 +#define HCLGEVF_TQP_EXT_REG_OFFSET 0x100 + struct hclgevf_tqp_map { __le16 tqp_id; /* Absolute tqp id for in this pf */ u8 tqp_vf; /* VF id */ @@ -149,12 +152,13 @@ enum HCLGEVF_CAP_BITS { HCLGEVF_CAP_FD_FORWARD_TC_B, HCLGEVF_CAP_PTP_B, HCLGEVF_CAP_INT_QL_B, - HCLGEVF_CAP_SIMPLE_BD_B, + HCLGEVF_CAP_HW_TX_CSUM_B, HCLGEVF_CAP_TX_PUSH_B, HCLGEVF_CAP_PHY_IMP_B, HCLGEVF_CAP_TQP_TXRX_INDEP_B, HCLGEVF_CAP_HW_PAD_B, HCLGEVF_CAP_STASH_B, + HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, }; #define HCLGEVF_QUERY_CAP_LENGTH 3 @@ -285,6 +289,14 @@ struct hclgevf_dev_specs_0_cmd { u8 rsv1[5]; }; +#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U + +struct hclgevf_dev_specs_1_cmd { + __le32 rsv0; + __le16 max_int_gl; + u8 rsv1[18]; +}; + static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) { writel(value, base + reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index c8e3fdd5999c..145757cb70f9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -14,6 +14,9 @@ #define HCLGEVF_RESET_MAX_FAIL_CNT 5 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); +static void hclgevf_task_schedule(struct hclgevf_dev *hdev, + unsigned long delay); + static struct hnae3_ae_algo ae_algovf; static struct workqueue_struct *hclgevf_wq; @@ -403,8 +406,20 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) tqp->q.buf_size = hdev->rx_buf_len; tqp->q.tx_desc_num = hdev->num_tx_desc; tqp->q.rx_desc_num = hdev->num_rx_desc; - tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + - i * HCLGEVF_TQP_REG_SIZE; + + /* need an extended offset to configure queues >= + * HCLGEVF_TQP_MAX_SIZE_DEV_V2. + */ + if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) + tqp->q.io_base = hdev->hw.io_base + + HCLGEVF_TQP_REG_OFFSET + + i * HCLGEVF_TQP_REG_SIZE; + else + tqp->q.io_base = hdev->hw.io_base + + HCLGEVF_TQP_REG_OFFSET + + HCLGEVF_TQP_EXT_REG_OFFSET + + (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * + HCLGEVF_TQP_REG_SIZE; tqp++; } @@ -418,19 +433,20 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) struct hnae3_knic_private_info *kinfo; u16 new_tqps = hdev->num_tqps; unsigned int i; + u8 num_tc = 0; kinfo = &nic->kinfo; - kinfo->num_tc = 0; kinfo->num_tx_desc = hdev->num_tx_desc; kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) - kinfo->num_tc++; + num_tc++; - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc); - new_tqps = kinfo->rss_size * kinfo->num_tc; + num_tc = num_tc ? num_tc : 1; + kinfo->tc_info.num_tc = num_tc; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); + new_tqps = kinfo->rss_size * num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, @@ -448,7 +464,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) * and rss size with the actual vector numbers */ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); - kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc, + kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, kinfo->rss_size); return 0; @@ -1134,6 +1150,7 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc) { + struct hnae3_handle *handle = &hdev->nic; struct hclge_vf_to_pf_msg send_msg; int ret; @@ -1142,6 +1159,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, send_msg.en_bc = en_bc_pmc ? 1 : 0; send_msg.en_uc = en_uc_pmc ? 1 : 0; send_msg.en_mc = en_mc_pmc ? 1 : 0; + send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, + &handle->priv_flags) ? 1 : 0; ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); if (ret) @@ -1168,6 +1187,7 @@ static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); + hclgevf_task_schedule(hdev, 0); } static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) @@ -2430,6 +2450,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) roce->rinfo.netdev = nic->kinfo.netdev; roce->rinfo.roce_io_base = hdev->hw.io_base; + roce->rinfo.roce_mem_base = hdev->hw.mem_base; roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; @@ -2875,6 +2896,29 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client, } } +static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_MEM_BAR 4 + + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_hw *hw = &hdev->hw; + + /* for device does not have device memory, return directly */ + if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) + return 0; + + hw->mem_base = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, + HCLGEVF_MEM_BAR), + pci_resource_len(pdev, HCLGEVF_MEM_BAR)); + if (!hw->mem_base) { + dev_err(&pdev->dev, "failed to map device memory\n"); + return -EFAULT; + } + + return 0; +} + static int hclgevf_pci_init(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; @@ -2909,8 +2953,14 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) goto err_clr_master; } + ret = hclgevf_dev_mem_map(hdev); + if (ret) + goto err_unmap_io_base; + return 0; +err_unmap_io_base: + pci_iounmap(pdev, hdev->hw.io_base); err_clr_master: pci_clear_master(pdev); pci_release_regions(pdev); @@ -2924,6 +2974,9 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + if (hdev->hw.mem_base) + devm_iounmap(&pdev->dev, hdev->hw.mem_base); + pci_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); pci_release_regions(pdev); @@ -2991,6 +3044,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) HCLGEVF_MAX_NON_TSO_BD_NUM; ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE; + ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; } static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, @@ -2998,13 +3052,17 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hclgevf_dev_specs_0_cmd *req0; + struct hclgevf_dev_specs_1_cmd *req1; req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; + req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; ae_dev->dev_specs.rss_ind_tbl_size = le16_to_cpu(req0->rss_ind_tbl_size); + ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); + ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); } static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) @@ -3017,6 +3075,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; if (!dev_specs->rss_key_size) dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE; + if (!dev_specs->max_int_gl) + dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; } static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) @@ -3301,7 +3361,7 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) struct hnae3_knic_private_info *kinfo = &nic->kinfo; return min_t(u32, hdev->rss_size_max, - hdev->num_tqps / kinfo->num_tc); + hdev->num_tqps / kinfo->tc_info.num_tc); } /** @@ -3344,7 +3404,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle, kinfo->req_rss_size = new_tqps_num; max_rss_size = min_t(u16, hdev->rss_size_max, - hdev->num_tqps / kinfo->num_tc); + hdev->num_tqps / kinfo->tc_info.num_tc); /* Use the user's configuration when it is not larger than * max_rss_size, otherwise, use the maximum specification value. @@ -3356,7 +3416,7 @@ static void hclgevf_update_rss_size(struct hnae3_handle *handle, (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) kinfo->rss_size = max_rss_size; - kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; + kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; } static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, @@ -3402,7 +3462,7 @@ out: dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, - cur_tqps, kinfo->rss_size * kinfo->num_tc); + cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index c5bcc3894fd5..1b183bc35604 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -164,6 +164,7 @@ struct hclgevf_mac { struct hclgevf_hw { void __iomem *io_base; + void __iomem *mem_base; int num_vec; struct hclgevf_cmq cmq; struct hclgevf_mac mac; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 2630d667f393..58d5646444b0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -285,18 +285,8 @@ static int hinic_devlink_flash_update(struct devlink *devlink, struct netlink_ext_ack *extack) { struct hinic_devlink_priv *priv = devlink_priv(devlink); - const struct firmware *fw; - int err; - - err = request_firmware_direct(&fw, params->file_name, - &priv->hwdev->hwif->pdev->dev); - if (err) - return err; - - err = hinic_firmware_update(priv, fw, extack); - release_firmware(fw); - return err; + return hinic_firmware_update(priv, params->fw, extack); } static const struct devlink_ops hinic_devlink_ops = { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 350225bbe0be..9a9b09401d01 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -313,13 +313,7 @@ static void free_rxqs(struct hinic_dev *nic_dev) static int hinic_configure_max_qnum(struct hinic_dev *nic_dev) { - int err; - - err = hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); - if (err) - return err; - - return 0; + return hinic_set_max_qnum(nic_dev, nic_dev->hwdev->nic_cap.max_qps); } static int hinic_rss_init(struct hinic_dev *nic_dev) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 9c3cbe45c9ec..c9ae3d4dc547 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -8,6 +8,7 @@ #define HINIC_PORT_H #include <linux/types.h> +#include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/bitops.h> diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index da9450f18717..a2191392ca4f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -84,8 +84,6 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *); static int ibmvnic_send_crq_init(struct ibmvnic_adapter *); static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); -static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, - union sub_crq *sub_crq); static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); static int enable_scrq_irq(struct ibmvnic_adapter *, @@ -306,9 +304,11 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, int count = pool->size - atomic_read(&pool->available); u64 handle = adapter->rx_scrq[pool->index]->handle; struct device *dev = &adapter->vdev->dev; + struct ibmvnic_ind_xmit_queue *ind_bufp; + struct ibmvnic_sub_crq_queue *rx_scrq; + union sub_crq *sub_crq; int buffers_added = 0; unsigned long lpar_rc; - union sub_crq sub_crq; struct sk_buff *skb; unsigned int offset; dma_addr_t dma_addr; @@ -320,8 +320,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, if (!pool->active) return; + rx_scrq = adapter->rx_scrq[pool->index]; + ind_bufp = &rx_scrq->ind_buf; for (i = 0; i < count; ++i) { - skb = alloc_skb(pool->buff_size, GFP_ATOMIC); + skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); if (!skb) { dev_err(dev, "Couldn't replenish rx buff\n"); adapter->replenish_no_mem++; @@ -346,12 +348,13 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, pool->rx_buff[index].pool_index = pool->index; pool->rx_buff[index].size = pool->buff_size; - memset(&sub_crq, 0, sizeof(sub_crq)); - sub_crq.rx_add.first = IBMVNIC_CRQ_CMD; - sub_crq.rx_add.correlator = + sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; + memset(sub_crq, 0, sizeof(*sub_crq)); + sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; + sub_crq->rx_add.correlator = cpu_to_be64((u64)&pool->rx_buff[index]); - sub_crq.rx_add.ioba = cpu_to_be32(dma_addr); - sub_crq.rx_add.map_id = pool->long_term_buff.map_id; + sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); + sub_crq->rx_add.map_id = pool->long_term_buff.map_id; /* The length field of the sCRQ is defined to be 24 bits so the * buffer size needs to be left shifted by a byte before it is @@ -361,15 +364,20 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, #ifdef __LITTLE_ENDIAN__ shift = 8; #endif - sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift); - - lpar_rc = send_subcrq(adapter, handle, &sub_crq); - if (lpar_rc != H_SUCCESS) - goto failure; - - buffers_added++; - adapter->replenish_add_buff_success++; + sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); pool->next_free = (pool->next_free + 1) % pool->size; + if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || + i == count - 1) { + lpar_rc = + send_subcrq_indirect(adapter, handle, + (u64)ind_bufp->indir_dma, + (u64)ind_bufp->index); + if (lpar_rc != H_SUCCESS) + goto failure; + buffers_added += ind_bufp->index; + adapter->replenish_add_buff_success += ind_bufp->index; + ind_bufp->index = 0; + } } atomic_add(buffers_added, &pool->available); return; @@ -377,13 +385,22 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, failure: if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED) dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n"); - pool->free_map[pool->next_free] = index; - pool->rx_buff[index].skb = NULL; + for (i = ind_bufp->index - 1; i >= 0; --i) { + struct ibmvnic_rx_buff *rx_buff; - dev_kfree_skb_any(skb); - adapter->replenish_add_buff_failure++; + pool->next_free = pool->next_free == 0 ? + pool->size - 1 : pool->next_free - 1; + sub_crq = &ind_bufp->indir_arr[i]; + rx_buff = (struct ibmvnic_rx_buff *) + be64_to_cpu(sub_crq->rx_add.correlator); + index = (int)(rx_buff - pool->rx_buff); + pool->free_map[pool->next_free] = index; + dev_kfree_skb_any(pool->rx_buff[index].skb); + pool->rx_buff[index].skb = NULL; + } + adapter->replenish_add_buff_failure += ind_bufp->index; atomic_add(buffers_added, &pool->available); - + ind_bufp->index = 0; if (lpar_rc == H_CLOSED || adapter->failover_pending) { /* Disable buffer pool replenishment and report carrier off if * queue is closed or pending failover. @@ -404,6 +421,8 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) if (adapter->rx_pool[i].active) replenish_rx_pool(adapter, &adapter->rx_pool[i]); } + + netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); } static void release_stats_buffers(struct ibmvnic_adapter *adapter) @@ -483,7 +502,7 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) if (rx_pool->buff_size != buff_size) { free_long_term_buff(adapter, &rx_pool->long_term_buff); - rx_pool->buff_size = buff_size; + rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff, rx_pool->size * @@ -577,7 +596,7 @@ static int init_rx_pools(struct net_device *netdev) rx_pool->size = adapter->req_rx_add_entries_per_subcrq; rx_pool->index = i; - rx_pool->buff_size = buff_size; + rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rx_pool->active = 1; rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), @@ -730,6 +749,7 @@ static int init_tx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int tx_subcrqs; + u64 buff_size; int i, rc; tx_subcrqs = adapter->num_active_tx_scrqs; @@ -746,9 +766,11 @@ static int init_tx_pools(struct net_device *netdev) adapter->num_active_tx_pools = tx_subcrqs; for (i = 0; i < tx_subcrqs; i++) { + buff_size = adapter->req_mtu + VLAN_HLEN; + buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], adapter->req_tx_entries_per_subcrq, - adapter->req_mtu + VLAN_HLEN); + buff_size); if (rc) { release_tx_pools(adapter); return rc; @@ -909,6 +931,7 @@ static int ibmvnic_login(struct net_device *netdev) __ibmvnic_set_mac(netdev, adapter->mac_addr); + netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state); return 0; } @@ -1146,6 +1169,7 @@ static int __ibmvnic_open(struct net_device *netdev) if (prev_state == VNIC_CLOSED) enable_irq(adapter->tx_scrq[i]->irq); enable_scrq_irq(adapter, adapter->tx_scrq[i]); + netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i)); } rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP); @@ -1339,6 +1363,10 @@ static int ibmvnic_close(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; + netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n", + adapter->state, adapter->failover_pending, + adapter->force_reset_recovery); + /* If device failover is pending, just set device state and return. * Device operation will be handled by reset routine. */ @@ -1476,17 +1504,18 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. */ -static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, +static void build_hdr_descs_arr(struct sk_buff *skb, + union sub_crq *indir_arr, int *num_entries, u8 hdr_field) { int hdr_len[3] = {0, 0, 0}; + u8 hdr_data[140] = {0}; int tot_len; - u8 *hdr_data = txbuff->hdr_data; - tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, - txbuff->hdr_data); + tot_len = build_hdr_data(hdr_field, skb, hdr_len, + hdr_data); *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, - txbuff->indir_arr + 1); + indir_arr + 1); } static int ibmvnic_xmit_workarounds(struct sk_buff *skb, @@ -1504,17 +1533,95 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb, return 0; } +static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *tx_scrq) +{ + struct ibmvnic_ind_xmit_queue *ind_bufp; + struct ibmvnic_tx_buff *tx_buff; + struct ibmvnic_tx_pool *tx_pool; + union sub_crq tx_scrq_entry; + int queue_num; + int entries; + int index; + int i; + + ind_bufp = &tx_scrq->ind_buf; + entries = (u64)ind_bufp->index; + queue_num = tx_scrq->pool_index; + + for (i = entries - 1; i >= 0; --i) { + tx_scrq_entry = ind_bufp->indir_arr[i]; + if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC) + continue; + index = be32_to_cpu(tx_scrq_entry.v1.correlator); + if (index & IBMVNIC_TSO_POOL_MASK) { + tx_pool = &adapter->tso_pool[queue_num]; + index &= ~IBMVNIC_TSO_POOL_MASK; + } else { + tx_pool = &adapter->tx_pool[queue_num]; + } + tx_pool->free_map[tx_pool->consumer_index] = index; + tx_pool->consumer_index = tx_pool->consumer_index == 0 ? + tx_pool->num_buffers - 1 : + tx_pool->consumer_index - 1; + tx_buff = &tx_pool->tx_buff[index]; + adapter->netdev->stats.tx_packets--; + adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; + adapter->tx_stats_buffers[queue_num].packets--; + adapter->tx_stats_buffers[queue_num].bytes -= + tx_buff->skb->len; + dev_kfree_skb_any(tx_buff->skb); + tx_buff->skb = NULL; + adapter->netdev->stats.tx_dropped++; + } + ind_bufp->index = 0; + if (atomic_sub_return(entries, &tx_scrq->used) <= + (adapter->req_tx_entries_per_subcrq / 2) && + __netif_subqueue_stopped(adapter->netdev, queue_num)) { + netif_wake_subqueue(adapter->netdev, queue_num); + netdev_dbg(adapter->netdev, "Started queue %d\n", + queue_num); + } +} + +static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *tx_scrq) +{ + struct ibmvnic_ind_xmit_queue *ind_bufp; + u64 dma_addr; + u64 entries; + u64 handle; + int rc; + + ind_bufp = &tx_scrq->ind_buf; + dma_addr = (u64)ind_bufp->indir_dma; + entries = (u64)ind_bufp->index; + handle = tx_scrq->handle; + + if (!entries) + return 0; + rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + if (rc) + ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); + else + ind_bufp->index = 0; + return 0; +} + static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int queue_num = skb_get_queue_mapping(skb); u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; struct device *dev = &adapter->vdev->dev; + struct ibmvnic_ind_xmit_queue *ind_bufp; struct ibmvnic_tx_buff *tx_buff = NULL; struct ibmvnic_sub_crq_queue *tx_scrq; struct ibmvnic_tx_pool *tx_pool; unsigned int tx_send_failed = 0; + netdev_tx_t ret = NETDEV_TX_OK; unsigned int tx_map_failed = 0; + union sub_crq indir_arr[16]; unsigned int tx_dropped = 0; unsigned int tx_packets = 0; unsigned int tx_bytes = 0; @@ -1527,8 +1634,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned char *dst; int index = 0; u8 proto = 0; - u64 handle; - netdev_tx_t ret = NETDEV_TX_OK; + + tx_scrq = adapter->tx_scrq[queue_num]; + txq = netdev_get_tx_queue(netdev, queue_num); + ind_bufp = &tx_scrq->ind_buf; if (test_bit(0, &adapter->resetting)) { if (!netif_subqueue_stopped(netdev, skb)) @@ -1538,6 +1647,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } @@ -1545,6 +1655,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_dropped++; tx_send_failed++; ret = NETDEV_TX_OK; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } if (skb_is_gso(skb)) @@ -1552,10 +1663,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) else tx_pool = &adapter->tx_pool[queue_num]; - tx_scrq = adapter->tx_scrq[queue_num]; - txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb)); - handle = tx_scrq->handle; - index = tx_pool->free_map[tx_pool->consumer_index]; if (index == IBMVNIC_INVALID_MAP) { @@ -1563,6 +1670,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } @@ -1598,11 +1706,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff = &tx_pool->tx_buff[index]; tx_buff->skb = skb; - tx_buff->data_dma[0] = data_dma_addr; - tx_buff->data_len[0] = skb->len; tx_buff->index = index; tx_buff->pool_index = queue_num; - tx_buff->last_frag = true; memset(&tx_crq, 0, sizeof(tx_crq)); tx_crq.v1.first = IBMVNIC_CRQ_CMD; @@ -1647,55 +1752,29 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); hdrs += 2; } - /* determine if l2/3/4 headers are sent to firmware */ - if ((*hdrs >> 7) & 1) { - build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); - tx_crq.v1.n_crq_elem = num_entries; - tx_buff->num_entries = num_entries; - tx_buff->indir_arr[0] = tx_crq; - tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, - sizeof(tx_buff->indir_arr), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, tx_buff->indir_dma)) { - dev_kfree_skb_any(skb); - tx_buff->skb = NULL; - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(dev, "tx: unable to map descriptor array\n"); - tx_map_failed++; - tx_dropped++; - ret = NETDEV_TX_OK; - goto tx_err_out; - } - lpar_rc = send_subcrq_indirect(adapter, handle, - (u64)tx_buff->indir_dma, - (u64)num_entries); - dma_unmap_single(dev, tx_buff->indir_dma, - sizeof(tx_buff->indir_arr), DMA_TO_DEVICE); - } else { - tx_buff->num_entries = num_entries; - lpar_rc = send_subcrq(adapter, handle, - &tx_crq); - } - if (lpar_rc != H_SUCCESS) { - if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) - dev_err_ratelimited(dev, "tx: send failed\n"); - dev_kfree_skb_any(skb); - tx_buff->skb = NULL; - if (lpar_rc == H_CLOSED || adapter->failover_pending) { - /* Disable TX and report carrier off if queue is closed - * or pending failover. - * Firmware guarantees that a signal will be sent to the - * driver, triggering a reset or some other action. - */ - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - } + if ((*hdrs >> 7) & 1) + build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs); - tx_send_failed++; - tx_dropped++; - ret = NETDEV_TX_OK; - goto tx_err_out; + tx_crq.v1.n_crq_elem = num_entries; + tx_buff->num_entries = num_entries; + /* flush buffer if current entry can not fit */ + if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + if (lpar_rc != H_SUCCESS) + goto tx_flush_err; + } + + indir_arr[0] = tx_crq; + memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], + num_entries * sizeof(struct ibmvnic_generic_scrq)); + ind_bufp->index += num_entries; + if (__netdev_tx_sent_queue(txq, skb->len, + netdev_xmit_more() && + ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + if (lpar_rc != H_SUCCESS) + goto tx_err; } if (atomic_add_return(num_entries, &tx_scrq->used) @@ -1710,14 +1789,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) ret = NETDEV_TX_OK; goto out; -tx_err_out: - /* roll back consumer index and map array*/ - if (tx_pool->consumer_index == 0) - tx_pool->consumer_index = - tx_pool->num_buffers - 1; - else - tx_pool->consumer_index--; - tx_pool->free_map[tx_pool->consumer_index] = index; +tx_flush_err: + dev_kfree_skb_any(skb); + tx_buff->skb = NULL; + tx_pool->consumer_index = tx_pool->consumer_index == 0 ? + tx_pool->num_buffers - 1 : + tx_pool->consumer_index - 1; + tx_dropped++; +tx_err: + if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER) + dev_err_ratelimited(dev, "tx: send failed\n"); + + if (lpar_rc == H_CLOSED || adapter->failover_pending) { + /* Disable TX and report carrier off if queue is closed + * or pending failover. + * Firmware guarantees that a signal will be sent to the + * driver, triggering a reset or some other action. + */ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } out: netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; @@ -1931,8 +2022,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct net_device *netdev = adapter->netdev; int i, rc; - netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", - rwi->reset_reason); + netdev_dbg(adapter->netdev, + "[S:%d FOP:%d] Reset reason %d, reset_state %d\n", + adapter->state, adapter->failover_pending, + rwi->reset_reason, reset_state); rtnl_lock(); /* @@ -2091,6 +2184,8 @@ out: adapter->state = reset_state; rtnl_unlock(); + netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n", + adapter->state, adapter->failover_pending, rc); return rc; } @@ -2160,6 +2255,8 @@ out: /* restore adapter state if reset failed */ if (rc) adapter->state = reset_state; + netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n", + adapter->state, adapter->failover_pending, rc); return rc; } @@ -2270,6 +2367,11 @@ static void __ibmvnic_reset(struct work_struct *work) } clear_bit_unlock(0, &adapter->resetting); + + netdev_dbg(adapter->netdev, + "[S:%d FRR:%d WFR:%d] Done processing resets\n", + adapter->state, adapter->force_reset_recovery, + adapter->wait_for_reset); } static void __ibmvnic_delayed_reset(struct work_struct *work) @@ -2315,7 +2417,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { - netdev_dbg(netdev, "Skipping matching reset\n"); + netdev_dbg(netdev, "Skipping matching reset, reason=%d\n", + reason); spin_unlock_irqrestore(&adapter->rwi_lock, flags); ret = EBUSY; goto err; @@ -2381,10 +2484,17 @@ static void remove_buff_from_pool(struct ibmvnic_adapter *adapter, static int ibmvnic_poll(struct napi_struct *napi, int budget) { - struct net_device *netdev = napi->dev; - struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int scrq_num = (int)(napi - adapter->napi); - int frames_processed = 0; + struct ibmvnic_sub_crq_queue *rx_scrq; + struct ibmvnic_adapter *adapter; + struct net_device *netdev; + int frames_processed; + int scrq_num; + + netdev = napi->dev; + adapter = netdev_priv(netdev); + scrq_num = (int)(napi - adapter->napi); + frames_processed = 0; + rx_scrq = adapter->rx_scrq[scrq_num]; restart_poll: while (frames_processed < budget) { @@ -2397,12 +2507,12 @@ restart_poll: if (unlikely(test_bit(0, &adapter->resetting) && adapter->reset_reason != VNIC_RESET_NON_FATAL)) { - enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); + enable_scrq_irq(adapter, rx_scrq); napi_complete_done(napi, frames_processed); return frames_processed; } - if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) + if (!pending_scrq(adapter, rx_scrq)) break; /* The queue entry at the current index is peeked at above * to determine that there is a valid descriptor awaiting @@ -2410,7 +2520,7 @@ restart_poll: * holds a valid descriptor before reading its contents. */ dma_rmb(); - next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); + next = ibmvnic_next_scrq(adapter, rx_scrq); rx_buff = (struct ibmvnic_rx_buff *)be64_to_cpu(next-> rx_comp.correlator); @@ -2467,16 +2577,21 @@ restart_poll: frames_processed++; } - if (adapter->state != VNIC_CLOSING) + if (adapter->state != VNIC_CLOSING && + ((atomic_read(&adapter->rx_pool[scrq_num].available) < + adapter->req_rx_add_entries_per_subcrq / 2) || + frames_processed < budget)) replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); - if (frames_processed < budget) { - enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); - napi_complete_done(napi, frames_processed); - if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) && - napi_reschedule(napi)) { - disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); - goto restart_poll; + if (napi_complete_done(napi, frames_processed)) { + enable_scrq_irq(adapter, rx_scrq); + if (pending_scrq(adapter, rx_scrq)) { + rmb(); + if (napi_reschedule(napi)) { + disable_scrq_irq(adapter, rx_scrq); + goto restart_poll; + } + } } } return frames_processed; @@ -2880,10 +2995,12 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, irq_dispose_mapping(scrq->irq); scrq->irq = 0; } + if (scrq->msgs) { memset(scrq->msgs, 0, 4 * PAGE_SIZE); atomic_set(&scrq->used, 0); scrq->cur = 0; + scrq->ind_buf.index = 0; } else { netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); return -EINVAL; @@ -2942,6 +3059,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, } } + dma_free_coherent(dev, + IBMVNIC_IND_ARR_SZ, + scrq->ind_buf.indir_arr, + scrq->ind_buf.indir_dma); + dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, DMA_BIDIRECTIONAL); free_pages((unsigned long)scrq->msgs, 2); @@ -2988,6 +3110,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter scrq->adapter = adapter; scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); + scrq->ind_buf.index = 0; + + scrq->ind_buf.indir_arr = + dma_alloc_coherent(dev, + IBMVNIC_IND_ARR_SZ, + &scrq->ind_buf.indir_dma, + GFP_KERNEL); + + if (!scrq->ind_buf.indir_arr) + goto indir_failed; + spin_lock_init(&scrq->lock); netdev_dbg(adapter->netdev, @@ -2996,6 +3129,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter return scrq; +indir_failed: + do { + rc = plpar_hcall_norets(H_FREE_SUB_CRQ, + adapter->vdev->unit_address, + scrq->crq_num); + } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc)); reg_failed: dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, DMA_BIDIRECTIONAL); @@ -3110,14 +3249,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; struct ibmvnic_tx_pool *tx_pool; struct ibmvnic_tx_buff *txbuff; + struct netdev_queue *txq; union sub_crq *next; int index; - int i, j; + int i; restart_loop: while (pending_scrq(adapter, scrq)) { unsigned int pool = scrq->pool_index; int num_entries = 0; + int total_bytes = 0; + int num_packets = 0; /* The queue entry at the current index is peeked at above * to determine that there is a valid descriptor awaiting @@ -3140,21 +3282,16 @@ restart_loop: } txbuff = &tx_pool->tx_buff[index]; - - for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) { - if (!txbuff->data_dma[j]) - continue; - - txbuff->data_dma[j] = 0; - } - - if (txbuff->last_frag) { - dev_kfree_skb_any(txbuff->skb); + num_packets++; + num_entries += txbuff->num_entries; + if (txbuff->skb) { + total_bytes += txbuff->skb->len; + dev_consume_skb_irq(txbuff->skb); txbuff->skb = NULL; + } else { + netdev_warn(adapter->netdev, + "TX completion received with NULL socket buffer\n"); } - - num_entries += txbuff->num_entries; - tx_pool->free_map[tx_pool->producer_index] = index; tx_pool->producer_index = (tx_pool->producer_index + 1) % @@ -3163,6 +3300,9 @@ restart_loop: /* remove tx_comp scrq*/ next->tx_comp.first = 0; + txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); + netdev_tx_completed_queue(txq, num_packets, total_bytes); + if (atomic_sub_return(num_entries, &scrq->used) <= (adapter->req_tx_entries_per_subcrq / 2) && __netif_subqueue_stopped(adapter->netdev, @@ -3567,38 +3707,6 @@ static void print_subcrq_error(struct device *dev, int rc, const char *func) } } -static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, - union sub_crq *sub_crq) -{ - unsigned int ua = adapter->vdev->unit_address; - struct device *dev = &adapter->vdev->dev; - u64 *u64_crq = (u64 *)sub_crq; - int rc; - - netdev_dbg(adapter->netdev, - "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n", - (unsigned long int)cpu_to_be64(remote_handle), - (unsigned long int)cpu_to_be64(u64_crq[0]), - (unsigned long int)cpu_to_be64(u64_crq[1]), - (unsigned long int)cpu_to_be64(u64_crq[2]), - (unsigned long int)cpu_to_be64(u64_crq[3])); - - /* Make sure the hypervisor sees the complete request */ - mb(); - - rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, - cpu_to_be64(remote_handle), - cpu_to_be64(u64_crq[0]), - cpu_to_be64(u64_crq[1]), - cpu_to_be64(u64_crq[2]), - cpu_to_be64(u64_crq[3])); - - if (rc) - print_subcrq_error(dev, rc, __func__); - - return rc; -} - static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, u64 remote_handle, u64 ioba, u64 num_entries) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 21e7ea858cda..c09c3f6bba9f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -31,6 +31,8 @@ #define IBMVNIC_BUFFS_PER_POOL 100 #define IBMVNIC_MAX_QUEUES 16 #define IBMVNIC_MAX_QUEUE_SZ 4096 +#define IBMVNIC_MAX_IND_DESCS 128 +#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32) #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 @@ -224,8 +226,6 @@ struct ibmvnic_tx_comp_desc { #define IBMVNIC_TCP_CHKSUM 0x20 #define IBMVNIC_UDP_CHKSUM 0x08 -#define IBMVNIC_MAX_FRAGS_PER_CRQ 3 - struct ibmvnic_tx_desc { u8 first; u8 type; @@ -861,6 +861,12 @@ union sub_crq { struct ibmvnic_rx_buff_add_desc rx_add; }; +struct ibmvnic_ind_xmit_queue { + union sub_crq *indir_arr; + dma_addr_t indir_dma; + int index; +}; + struct ibmvnic_sub_crq_queue { union sub_crq *msgs; int size, cur; @@ -873,10 +879,11 @@ struct ibmvnic_sub_crq_queue { spinlock_t lock; struct sk_buff *rx_skb_top; struct ibmvnic_adapter *adapter; + struct ibmvnic_ind_xmit_queue ind_buf; atomic_t used; char name[32]; u64 handle; -}; +} ____cacheline_aligned; struct ibmvnic_long_term_buff { unsigned char *buff; @@ -887,14 +894,8 @@ struct ibmvnic_long_term_buff { struct ibmvnic_tx_buff { struct sk_buff *skb; - dma_addr_t data_dma[IBMVNIC_MAX_FRAGS_PER_CRQ]; - unsigned int data_len[IBMVNIC_MAX_FRAGS_PER_CRQ]; int index; int pool_index; - bool last_frag; - union sub_crq indir_arr[6]; - u8 hdr_data[140]; - dma_addr_t indir_dma; int num_entries; }; @@ -906,7 +907,7 @@ struct ibmvnic_tx_pool { struct ibmvnic_long_term_buff long_term_buff; int num_buffers; int buf_size; -}; +} ____cacheline_aligned; struct ibmvnic_rx_buff { struct sk_buff *skb; @@ -927,7 +928,7 @@ struct ibmvnic_rx_pool { int next_alloc; int active; struct ibmvnic_long_term_buff long_term_buff; -}; +} ____cacheline_aligned; struct ibmvnic_vpd { unsigned char *buff; @@ -1013,8 +1014,8 @@ struct ibmvnic_adapter { atomic_t running_cap_crqs; bool wait_capability; - struct ibmvnic_sub_crq_queue **tx_scrq; - struct ibmvnic_sub_crq_queue **rx_scrq; + struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned; + struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned; /* rx structs */ struct napi_struct *napi; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 908fefaa6b85..66776ba7bfb6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2019 Intel Corporation. */ +#include <linux/ethtool.h> #include <linux/vmalloc.h> #include "fm10k.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3f5825fa67c9..4aca637d4a23 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -676,6 +676,8 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) i40e_clean_tx_ring(tx_ring); kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; + kfree(tx_ring->xsk_descs); + tx_ring->xsk_descs = NULL; if (tx_ring->desc) { dma_free_coherent(tx_ring->dev, tx_ring->size, @@ -1277,6 +1279,13 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!tx_ring->tx_bi) goto err; + if (ring_is_xdp(tx_ring)) { + tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs), + GFP_KERNEL); + if (!tx_ring->xsk_descs) + goto err; + } + u64_stats_init(&tx_ring->syncp); /* round up to nearest 4K */ @@ -1300,6 +1309,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) return 0; err: + kfree(tx_ring->xsk_descs); + tx_ring->xsk_descs = NULL; kfree(tx_ring->tx_bi); tx_ring->tx_bi = NULL; return -ENOMEM; @@ -1436,7 +1447,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (rx_ring->vsi->type == I40E_VSI_MAIN) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->queue_index); + rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); if (err < 0) return err; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 2feed920ef8a..5f531b195959 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -389,6 +389,7 @@ struct i40e_ring { struct i40e_channel *ch; struct xdp_rxq_info xdp_rxq; struct xsk_buff_pool *xsk_pool; + struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */ } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1b5390ec3d78..729c4f0d5ac5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -63,7 +63,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) } else if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = - (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); + (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0); } else { pfe.event_data.link_event.link_status = ls->link_info & I40E_AQ_LINK_UP; @@ -4441,6 +4441,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; + struct i40e_link_status *ls = &pf->hw.phy.link_info; struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; @@ -4478,7 +4479,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) vf->link_forced = true; vf->link_up = true; pfe.event_data.link_event.link_status = true; - pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; + pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); break; case IFLA_VF_LINK_STATE_DISABLE: vf->link_forced = true; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 567fd67e900e..bfa84bfb0488 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -2,6 +2,7 @@ /* Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf_trace.h> +#include <linux/stringify.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> @@ -311,7 +312,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) continue; } - bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) @@ -381,58 +381,102 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_packets; } -/** - * i40e_xmit_zc - Performs zero-copy Tx AF_XDP - * @xdp_ring: XDP Tx ring - * @budget: NAPI budget - * - * Returns true if the work is finished. - **/ -static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) +static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, + unsigned int *total_bytes) { - unsigned int sent_frames = 0, total_bytes = 0; - struct i40e_tx_desc *tx_desc = NULL; - struct i40e_tx_buffer *tx_bi; - struct xdp_desc desc; + struct i40e_tx_desc *tx_desc; dma_addr_t dma; - while (budget-- > 0) { - if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc)) - break; + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); - dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr); - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, - desc.len); + tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, + 0, desc->len, 0); - tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; - tx_bi->bytecount = desc.len; + *total_bytes += desc->len; +} - tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); - tx_desc->buffer_addr = cpu_to_le64(dma); - tx_desc->cmd_type_offset_bsz = - build_ctob(I40E_TX_DESC_CMD_ICRC - | I40E_TX_DESC_CMD_EOP, - 0, desc.len, 0); +static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, + unsigned int *total_bytes) +{ + u16 ntu = xdp_ring->next_to_use; + struct i40e_tx_desc *tx_desc; + dma_addr_t dma; + u32 i; - sent_frames++; - total_bytes += tx_bi->bytecount; + loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { + dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); + xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); - xdp_ring->next_to_use++; - if (xdp_ring->next_to_use == xdp_ring->count) - xdp_ring->next_to_use = 0; + tx_desc = I40E_TX_DESC(xdp_ring, ntu++); + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | + I40E_TX_DESC_CMD_EOP, + 0, desc[i].len, 0); + + *total_bytes += desc[i].len; } - if (tx_desc) { - /* Request an interrupt for the last frame and bump tail ptr. */ - tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << - I40E_TXD_QW1_CMD_SHIFT); - i40e_xdp_ring_update_tail(xdp_ring); + xdp_ring->next_to_use = ntu; +} - xsk_tx_release(xdp_ring->xsk_pool); - i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes); +static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, + unsigned int *total_bytes) +{ + u32 batched, leftover, i; + + batched = nb_pkts & ~(PKTS_PER_BATCH - 1); + leftover = nb_pkts & (PKTS_PER_BATCH - 1); + for (i = 0; i < batched; i += PKTS_PER_BATCH) + i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); + for (i = batched; i < batched + leftover; i++) + i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); +} + +static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) +{ + u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; + struct i40e_tx_desc *tx_desc; + + tx_desc = I40E_TX_DESC(xdp_ring, ntu); + tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); +} + +/** + * i40e_xmit_zc - Performs zero-copy Tx AF_XDP + * @xdp_ring: XDP Tx ring + * @budget: NAPI budget + * + * Returns true if the work is finished. + **/ +static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) +{ + struct xdp_desc *descs = xdp_ring->xsk_descs; + u32 nb_pkts, nb_processed = 0; + unsigned int total_bytes = 0; + + nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); + if (!nb_pkts) + return false; + + if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { + nb_processed = xdp_ring->count - xdp_ring->next_to_use; + i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); + xdp_ring->next_to_use = 0; } - return !!budget; + i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, + &total_bytes); + + /* Request an interrupt for the last frame and bump tail ptr. */ + i40e_set_rs_bit(xdp_ring); + i40e_xdp_ring_update_tail(xdp_ring); + + i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); + + return true; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 7adfd8539247..ea88f4597a07 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -4,6 +4,22 @@ #ifndef _I40E_XSK_H_ #define _I40E_XSK_H_ +/* This value should match the pragma in the loop_unrolled_for + * macro. Why 4? It is strictly empirical. It seems to be a good + * compromise between the advantage of having simultaneous outstanding + * reads to the DMA array that can hide each others latency and the + * disadvantage of having a larger code path. + */ +#define PKTS_PER_BATCH 4 + +#ifdef __clang__ +#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for +#elif __GNUC__ >= 8 +#define loop_unrolled_for _Pragma("GCC unroll 4") for +#else +#define loop_unrolled_for for +#endif + struct i40e_vsi; struct xsk_buff_pool; struct zero_copy_allocator; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a0723831c4e4..56725356a17b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -304,7 +304,6 @@ struct ice_vsi { u8 irqs_ready:1; u8 current_isup:1; /* Sync 'link up' logging */ u8 stat_offsets_loaded:1; - u8 vlan_ena:1; u16 num_vlan; /* queue information */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index fe4320e2d1f2..3124a3bf519a 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -306,7 +306,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) /* coverity[check_return] */ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index); + ring->q_index, ring->q_vector->napi.napi_id); ring->xsk_pool = ice_xsk_pool(ring); if (ring->xsk_pool) { @@ -333,7 +333,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) /* coverity[check_return] */ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index); + ring->q_index, ring->q_vector->napi.napi_id); err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 7db5fd977367..6d7e7dd0ebe2 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -904,8 +904,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { - ice_debug(hw, ICE_DBG_SCHED, - "Failed to get scheduler allocated resources\n"); + ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); goto err_unroll_alloc; } @@ -925,7 +924,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); devm_kfree(ice_hw_to_dev(hw), pcaps); if (status) - goto err_unroll_sched; + dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", + status); /* Initialize port_info struct with link information */ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); @@ -1044,8 +1044,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw) } if (cnt == grst_timeout) { - ice_debug(hw, ICE_DBG_INIT, - "Global reset polling failed to complete.\n"); + ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } @@ -1063,16 +1062,14 @@ enum ice_status ice_check_reset(struct ice_hw *hw) for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, GLNVM_ULD) & uld_mask; if (reg == uld_mask) { - ice_debug(hw, ICE_DBG_INIT, - "Global reset processes done. %d\n", cnt); + ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; } mdelay(10); } if (cnt == ICE_PF_RESET_WAIT_COUNT) { - ice_debug(hw, ICE_DBG_INIT, - "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", + ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); return ICE_ERR_RESET_FAILED; } @@ -1124,8 +1121,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw) } if (cnt == ICE_PF_RESET_WAIT_COUNT) { - ice_debug(hw, ICE_DBG_INIT, - "PF reset polling failed to complete.\n"); + ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } @@ -1578,8 +1574,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, goto ice_acquire_res_exit; if (status) - ice_debug(hw, ICE_DBG_RES, - "resource %d acquire type %d failed.\n", res, access); + ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); /* If necessary, poll until the current lock owner timeouts */ timeout = time_left; @@ -1602,11 +1597,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, ice_acquire_res_exit: if (status == ICE_ERR_AQ_NO_WORK) { if (access == ICE_RES_WRITE) - ice_debug(hw, ICE_DBG_RES, - "resource indicates no work to do.\n"); + ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else - ice_debug(hw, ICE_DBG_RES, - "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); + ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); } return status; } @@ -1792,66 +1785,53 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, switch (cap) { case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; - ice_debug(hw, ICE_DBG_INIT, - "%s: valid_functions (bitmap) = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); - ice_debug(hw, ICE_DBG_INIT, - "%s: sr_iov_1_1 = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_DCB: caps->dcb = (number == 1); caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: dcb = %d\n", prefix, caps->dcb); - ice_debug(hw, ICE_DBG_INIT, - "%s: active_tc_bitmap = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, caps->active_tc_bitmap); - ice_debug(hw, ICE_DBG_INIT, - "%s: maxtc = %d\n", prefix, caps->maxtc); + ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: rss_table_size = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); - ice_debug(hw, ICE_DBG_INIT, - "%s: rss_table_entry_width = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_rxq = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, caps->num_rxq); - ice_debug(hw, ICE_DBG_INIT, - "%s: rxq_first_id = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_txq = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, caps->num_txq); - ice_debug(hw, ICE_DBG_INIT, - "%s: txq_first_id = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_msix_vectors = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); - ice_debug(hw, ICE_DBG_INIT, - "%s: msix_vector_first_id = %d\n", prefix, + ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_PENDING_NVM_VER: @@ -1904,8 +1884,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) if (hw->dev_caps.num_funcs > 4) { /* Max 4 TCs per port */ caps->maxtc = 4; - ice_debug(hw, ICE_DBG_INIT, - "reducing maxtc to %d (based on #ports)\n", + ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", caps->maxtc); } } @@ -1973,11 +1952,9 @@ ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) GLQF_FD_SIZE_FD_BSIZE_S; func_p->fd_fltr_best_effort = val; - ice_debug(hw, ICE_DBG_INIT, - "func caps: fd_fltr_guar = %d\n", + ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", func_p->fd_fltr_guar); - ice_debug(hw, ICE_DBG_INIT, - "func caps: fd_fltr_best_effort = %d\n", + ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", func_p->fd_fltr_best_effort); } @@ -2026,8 +2003,7 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, default: /* Don't list common capabilities as unknown */ if (!found) - ice_debug(hw, ICE_DBG_INIT, - "func caps: unknown capability[%d]: 0x%x\n", + ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", i, cap); break; } @@ -2160,8 +2136,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, default: /* Don't list common capabilities as unknown */ if (!found) - ice_debug(hw, ICE_DBG_INIT, - "dev caps: unknown capability[%d]: 0x%x\n", + ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", i, cap); break; } @@ -2618,8 +2593,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { - ice_debug(hw, ICE_DBG_PHY, - "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", + ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", cfg->caps); cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; @@ -3067,8 +3041,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) status = ice_update_link_info(pi); if (status) - ice_debug(pi->hw, ICE_DBG_LINK, - "get link status error, status = %d\n", + ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", status); } @@ -3793,8 +3766,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, * of the endianness of the machine. */ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, - "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", + ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", f, ce_info[f].width, ce_info[f].size_of); continue; } @@ -4261,10 +4233,6 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, */ bool ice_fw_supports_link_override(struct ice_hw *hw) { - /* Currently, only supported for E810 devices */ - if (hw->mac_type != ICE_MAC_E810) - return false; - if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) return true; @@ -4296,8 +4264,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read link override TLV.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); return status; } @@ -4308,8 +4275,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, /* link options first */ status = ice_read_sr_word(hw, tlv_start, &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override link options.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; @@ -4320,8 +4286,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; status = ice_read_sr_word(hw, offset, &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override phy config.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); return status; } ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; @@ -4331,8 +4296,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override link options.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ @@ -4345,8 +4309,7 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read override link options.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 1f46a7828be8..4db12d1f5808 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -717,8 +717,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) if (status != ICE_ERR_AQ_FW_CRITICAL) break; - ice_debug(hw, ICE_DBG_AQ_MSG, - "Retry Admin Queue init due to FW critical error\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -813,8 +812,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) details = ICE_CTL_Q_DETAILS(*sq, ntc); while (rd32(hw, cq->sq.head) != ntc) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); + ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); memset(desc, 0, sizeof(*desc)); memset(details, 0, sizeof(*details)); ntc++; @@ -852,8 +850,7 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) len = le16_to_cpu(cq_desc->datalen); - ice_debug(hw, ICE_DBG_AQ_DESC, - "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", + ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(cq_desc->opcode), le16_to_cpu(cq_desc->flags), le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); @@ -925,8 +922,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send queue not initialized.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; } @@ -938,8 +934,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, if (buf) { if (buf_size > cq->sq_buf_size) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Invalid buffer size for Control Send queue: %d.\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); status = ICE_ERR_INVAL_SIZE; goto sq_send_command_error; @@ -952,8 +947,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, val = rd32(hw, cq->sq.head); if (val >= cq->num_sq_entries) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "head overrun at %d in the Control Send Queue ring\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; @@ -971,8 +965,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, * called in a separate thread in case of asynchronous completions. */ if (ice_clean_sq(hw, cq) == 0) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Error: Control Send Queue is full.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); status = ICE_ERR_AQ_FULL; goto sq_send_command_error; } @@ -1000,8 +993,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } /* Debug desc and buffer */ - ice_debug(hw, ICE_DBG_AQ_DESC, - "ATQ: Control Send queue desc and buffer:\n"); + ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); @@ -1026,8 +1018,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, u16 copy_size = le16_to_cpu(desc->datalen); if (copy_size > buf_size) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Return len %d > than buf len %d\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); status = ICE_ERR_AQ_ERROR; } else { @@ -1036,8 +1027,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, } retval = le16_to_cpu(desc->retval); if (retval) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send Queue command 0x%04X completed with error 0x%X\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", le16_to_cpu(desc->opcode), retval); @@ -1050,8 +1040,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq->sq_last_status = (enum ice_aq_err)retval; } - ice_debug(hw, ICE_DBG_AQ_MSG, - "ATQ: desc and buffer writeback:\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); ice_debug_cq(hw, (void *)desc, buf, buf_size); @@ -1067,8 +1056,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); status = ICE_ERR_AQ_FW_CRITICAL; } else { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Send Queue Writeback timeout.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); status = ICE_ERR_AQ_TIMEOUT; } } @@ -1124,8 +1112,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, mutex_lock(&cq->rq_lock); if (!cq->rq.count) { - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Receive queue not initialized.\n"); + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); ret_code = ICE_ERR_AQ_EMPTY; goto clean_rq_elem_err; } @@ -1147,8 +1134,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, flags = le16_to_cpu(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; - ice_debug(hw, ICE_DBG_AQ_MSG, - "Control Receive Queue Event 0x%04X received with error 0x%X\n", + ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", le16_to_cpu(desc->opcode), cq->rq_last_status); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 511da59bd6f2..29d6192b15f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -247,9 +247,7 @@ ice_devlink_flash_update(struct devlink *devlink, struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); - struct device *dev = &pf->pdev->dev; struct ice_hw *hw = &pf->hw; - const struct firmware *fw; u8 preservation; int err; @@ -277,22 +275,9 @@ ice_devlink_flash_update(struct devlink *devlink, if (err) return err; - err = request_firmware(&fw, params->file_name, dev); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk"); - return err; - } - - dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name); - - devlink_flash_update_begin_notify(devlink); devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0); - err = ice_flash_pldm_image(pf, fw, preservation, extack); - devlink_flash_update_end_notify(devlink); - - release_firmware(fw); - return err; + return ice_flash_pldm_image(pf, params->fw, preservation, extack); } static const struct devlink_ops ice_devlink_ops = { diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 9095b4d274ad..f5e81b555353 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -709,8 +709,7 @@ ice_acquire_global_cfg_lock(struct ice_hw *hw, if (!status) mutex_lock(&ice_global_cfg_lock_sw); else if (status == ICE_ERR_AQ_NO_WORK) - ice_debug(hw, ICE_DBG_PKG, - "Global config lock: No work to do\n"); + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; } @@ -909,8 +908,7 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) last, &offset, &info, NULL); if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Update pkg failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", status, offset, info); break; } @@ -988,8 +986,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) /* Save AQ status from download package */ hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { - ice_debug(hw, ICE_DBG_PKG, - "Pkg download failed: err %d off %d inf %d\n", + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); break; @@ -1083,8 +1080,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft, meta_seg->pkg_name); } else { - ice_debug(hw, ICE_DBG_INIT, - "Did not find metadata segment in driver package\n"); + ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n"); return ICE_ERR_CFG; } @@ -1101,8 +1097,7 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); } else { - ice_debug(hw, ICE_DBG_INIT, - "Did not find ice segment in driver package\n"); + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); return ICE_ERR_CFG; } @@ -1318,8 +1313,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { status = ICE_ERR_FW_DDP_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, - "OS package is not compatible with NVM.\n"); + ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ break; @@ -1387,8 +1381,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) ice_init_pkg_hints(hw, seg); status = ice_download_pkg(hw, seg); if (status == ICE_ERR_AQ_NO_WORK) { - ice_debug(hw, ICE_DBG_INIT, - "package previously loaded - no work.\n"); + ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); status = 0; } @@ -3261,8 +3254,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) if (ent->profile_cookie == hdl) return true; - ice_debug(hw, ICE_DBG_INIT, - "Characteristic list for VSI group %d not found.\n", + ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", vsig); return false; } diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index eadc85aee389..89a0cef20506 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -708,57 +708,64 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_prof **prof) { - struct ice_flow_prof_params params; + struct ice_flow_prof_params *params; enum ice_status status; u8 i; if (!prof) return ICE_ERR_BAD_PTR; - memset(¶ms, 0, sizeof(params)); - params.prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params.prof), - GFP_KERNEL); - if (!params.prof) + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) return ICE_ERR_NO_MEMORY; + params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof), + GFP_KERNEL); + if (!params->prof) { + status = ICE_ERR_NO_MEMORY; + goto free_params; + } + /* initialize extraction sequence to all invalid (0xff) */ for (i = 0; i < ICE_MAX_FV_WORDS; i++) { - params.es[i].prot_id = ICE_PROT_INVALID; - params.es[i].off = ICE_FV_OFFSET_INVAL; + params->es[i].prot_id = ICE_PROT_INVALID; + params->es[i].off = ICE_FV_OFFSET_INVAL; } - params.blk = blk; - params.prof->id = prof_id; - params.prof->dir = dir; - params.prof->segs_cnt = segs_cnt; + params->blk = blk; + params->prof->id = prof_id; + params->prof->dir = dir; + params->prof->segs_cnt = segs_cnt; /* Make a copy of the segments that need to be persistent in the flow * profile instance */ for (i = 0; i < segs_cnt; i++) - memcpy(¶ms.prof->segs[i], &segs[i], sizeof(*segs)); + memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs)); - status = ice_flow_proc_segs(hw, ¶ms); + status = ice_flow_proc_segs(hw, params); if (status) { - ice_debug(hw, ICE_DBG_FLOW, - "Error processing a flow's packet segments\n"); + ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); goto out; } /* Add a HW profile for this flow profile */ - status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es); + status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, + params->es); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; } - INIT_LIST_HEAD(¶ms.prof->entries); - mutex_init(¶ms.prof->entries_lock); - *prof = params.prof; + INIT_LIST_HEAD(¶ms->prof->entries); + mutex_init(¶ms->prof->entries_lock); + *prof = params->prof; out: if (status) - devm_kfree(ice_hw_to_dev(hw), params.prof); + devm_kfree(ice_hw_to_dev(hw), params->prof); +free_params: + kfree(params); return status; } @@ -827,8 +834,7 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, if (!status) set_bit(vsi_handle, prof->vsis); else - ice_debug(hw, ICE_DBG_FLOW, - "HW profile add failed, %d\n", + ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", status); } @@ -859,8 +865,7 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, if (!status) clear_bit(vsi_handle, prof->vsis); else - ice_debug(hw, ICE_DBG_FLOW, - "HW profile remove failed, %d\n", + ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", status); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2dea4d0e9415..c52b9bb0e3ab 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -224,7 +224,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc) if (vsi->type != ICE_VSI_PF) return 0; - if (vsi->vlan_ena) { + if (vsi->num_vlan > 1) { status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m, set_promisc); } else { @@ -326,7 +326,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { if (vsi->current_netdev_flags & IFF_ALLMULTI) { - if (vsi->vlan_ena) + if (vsi->num_vlan > 1) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_MCAST_PROMISC_BITS; @@ -340,7 +340,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) } } else { /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ - if (vsi->vlan_ena) + if (vsi->num_vlan > 1) promisc_m = ICE_MCAST_VLAN_PROMISC_BITS; else promisc_m = ICE_MCAST_PROMISC_BITS; @@ -667,7 +667,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) speed = "100 M"; break; default: - speed = "Unknown"; + speed = "Unknown "; break; } @@ -3116,10 +3116,8 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, * packets aren't pruned by the device's internal switch on Rx */ ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI); - if (!ret) { - vsi->vlan_ena = true; + if (!ret) set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); - } return ret; } @@ -3158,7 +3156,6 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi)) ret = ice_cfg_vlan_pruning(vsi, false, false); - vsi->vlan_ena = false; set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags); return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 5903a36763de..f729cd0c6224 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -55,7 +55,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, * * Reads a portion of the NVM, as a flat memory space. This function correctly * breaks read requests across Shadow RAM sectors and ensures that no single - * read request exceeds the maximum 4Kb read for a single AdminQ command. + * read request exceeds the maximum 4KB read for a single AdminQ command. * * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. @@ -73,18 +73,17 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) { - ice_debug(hw, ICE_DBG_NVM, - "NVM error: requested offset is beyond Shadow RAM limit\n"); + ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n"); return ICE_ERR_PARAM; } do { u32 read_size, sector_offset; - /* ice_aq_read_nvm cannot read more than 4Kb at a time. + /* ice_aq_read_nvm cannot read more than 4KB at a time. * Additionally, a read from the Shadow RAM may not cross over * a sector boundary. Conveniently, the sector size is also - * 4Kb. + * 4KB. */ sector_offset = offset % ICE_AQ_MAX_BUF_LEN; read_size = min_t(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, @@ -196,7 +195,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) * Shadow RAM sector restrictions necessary when reading from the NVM. */ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, - (u8 *)&data_local, true); + (__force u8 *)&data_local, true); if (status) return status; @@ -397,8 +396,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len, ICE_SR_BOOT_CFG_PTR); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read Boot Configuration Block TLV.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n"); return status; } @@ -406,8 +404,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw) * (Combo Image Version High and Combo Image Version Low) */ if (boot_cfg_tlv_len < 2) { - ice_debug(hw, ICE_DBG_INIT, - "Invalid Boot Configuration Block TLV size.\n"); + ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n"); return ICE_ERR_INVAL_SIZE; } @@ -542,14 +539,12 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) status = ice_read_flat_nvm(hw, offset, &len, &data, false); if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { - ice_debug(hw, ICE_DBG_NVM, - "%s: New upper bound of %u bytes\n", + ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); status = 0; max_size = offset; } else if (!status) { - ice_debug(hw, ICE_DBG_NVM, - "%s: New lower bound of %u bytes\n", + ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", __func__, offset); min_size = offset; } else { @@ -558,8 +553,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) } } - ice_debug(hw, ICE_DBG_NVM, - "Predicted flash size is %u bytes\n", max_size); + ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); hw->nvm.flash_size = max_size; @@ -600,15 +594,13 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) } else { /* Blank programming mode */ nvm->blank_nvm_mode = true; - ice_debug(hw, ICE_DBG_NVM, - "NVM init error: unsupported blank mode.\n"); + ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); return ICE_ERR_NVM_BLANK_MODE; } status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { - ice_debug(hw, ICE_DBG_INIT, - "Failed to read DEV starter version.\n"); + ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n"); return status; } nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; @@ -629,37 +621,10 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) status = ice_discover_flash_size(hw); if (status) { - ice_debug(hw, ICE_DBG_NVM, - "NVM init error: failed to discover flash size.\n"); + ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); return status; } - switch (hw->device_id) { - /* the following devices do not have boot_cfg_tlv yet */ - case ICE_DEV_ID_E823C_BACKPLANE: - case ICE_DEV_ID_E823C_QSFP: - case ICE_DEV_ID_E823C_SFP: - case ICE_DEV_ID_E823C_10G_BASE_T: - case ICE_DEV_ID_E823C_SGMII: - case ICE_DEV_ID_E822C_BACKPLANE: - case ICE_DEV_ID_E822C_QSFP: - case ICE_DEV_ID_E822C_10G_BASE_T: - case ICE_DEV_ID_E822C_SGMII: - case ICE_DEV_ID_E822C_SFP: - case ICE_DEV_ID_E822L_BACKPLANE: - case ICE_DEV_ID_E822L_SFP: - case ICE_DEV_ID_E822L_10G_BASE_T: - case ICE_DEV_ID_E822L_SGMII: - case ICE_DEV_ID_E823L_BACKPLANE: - case ICE_DEV_ID_E823L_SFP: - case ICE_DEV_ID_E823L_10G_BASE_T: - case ICE_DEV_ID_E823L_1GBE: - case ICE_DEV_ID_E823L_QSFP: - return status; - default: - break; - } - status = ice_get_orom_ver_info(hw); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 44a228530253..f0912e44d4ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -164,8 +164,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, parent = ice_sched_find_node_by_teid(pi->root, le32_to_cpu(info->parent_teid)); if (!parent) { - ice_debug(hw, ICE_DBG_SCHED, - "Parent Node not found for parent_teid=0x%x\n", + ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", le32_to_cpu(info->parent_teid)); return ICE_ERR_PARAM; } @@ -704,8 +703,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); if (status) { - ice_debug(hw, ICE_DBG_SCHED, - "Remove rl profile failed\n"); + ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); /* On error, free mem required */ list_del(&rl_prof_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); @@ -863,8 +861,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, for (i = 0; i < num_nodes; i++) { status = ice_sched_add_node(pi, layer, &buf->generic[i]); if (status) { - ice_debug(hw, ICE_DBG_SCHED, - "add nodes in SW DB failed status =%d\n", + ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", status); break; } @@ -872,8 +869,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, teid = le32_to_cpu(buf->generic[i].node_teid); new_node = ice_sched_find_node_by_teid(parent, teid); if (!new_node) { - ice_debug(hw, ICE_DBG_SCHED, - "Node is missing for teid =%d\n", teid); + ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); break; } @@ -1830,8 +1826,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) continue; if (ice_sched_is_leaf_node_present(vsi_node)) { - ice_debug(pi->hw, ICE_DBG_SCHED, - "VSI has leaf nodes in TC %d\n", i); + ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); status = ICE_ERR_IN_USE; goto exit_sched_rm_vsi_cfg; } @@ -1896,8 +1891,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, &pi->rl_prof_list[ln], list_entry) { if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) - ice_debug(pi->hw, ICE_DBG_SCHED, - "Removed rl profile\n"); + ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n"); } } } @@ -2441,8 +2435,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); if (status && status != ICE_ERR_IN_USE) - ice_debug(pi->hw, ICE_DBG_SCHED, - "Remove rl profile failed\n"); + ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } if (status == ICE_ERR_IN_USE) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index c3a6c41385ee..c33612132ddf 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -537,8 +537,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; break; default: - ice_debug(pi->hw, ICE_DBG_SW, - "incorrect VSI/port type received\n"); + ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); break; } } @@ -1476,8 +1475,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, tmp_fltr_info.vsi_handle = rem_vsi_handle; status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); if (status) { - ice_debug(hw, ICE_DBG_SW, - "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", + ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", tmp_fltr_info.fwd_id.hw_vsi_id, status); return status; } @@ -1493,8 +1491,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, /* Remove the VSI list since it is no longer used */ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); if (status) { - ice_debug(hw, ICE_DBG_SW, - "Failed to remove VSI list %d, error %d\n", + ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", vsi_list_id, status); return status; } @@ -1853,8 +1850,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) */ if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { - ice_debug(hw, ICE_DBG_SW, - "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); + ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); status = ICE_ERR_CFG; goto exit; } @@ -2740,8 +2736,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) - ice_debug(hw, ICE_DBG_SW, - "counter resource could not be freed\n"); + ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); kfree(buf); return status; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 23eca2f0a03b..a2d0aad8cfdd 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -483,7 +483,7 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) if (rx_ring->vsi->type == ICE_VSI_PF && !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->q_index)) + rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) goto err; return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 797886524054..39757b4cf8f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -570,12 +570,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) u16 vlan_tag = 0; u8 rx_ptype; - if (cleaned_count >= ICE_RX_BUF_WRITE) { - failure |= ice_alloc_rx_bufs_zc(rx_ring, - cleaned_count); - cleaned_count = 0; - } - rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean); stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S); @@ -642,6 +636,9 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) ice_receive_skb(rx_ring, skb, vlan_tag); } + if (cleaned_count >= ICE_RX_BUF_WRITE) + failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count); + ice_finalize_xdp_rx(rx_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 0d343d050973..03f78fdb0dcd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4361,7 +4361,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, - rx_ring->queue_index) < 0) + rx_ring->queue_index, 0) < 0) goto err; return 0; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index ee9f8c1dca83..30fdea24e94a 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1236,7 +1236,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, spin_lock_bh(&hw->mbx_lock); if (hw->mac.ops.set_vfta(hw, vid, true)) { - dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); + dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid); spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } @@ -1520,7 +1520,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter) /* Allow time for pending master requests to run */ if (mac->ops.reset_hw(hw)) - dev_err(&adapter->pdev->dev, "PF still resetting\n"); + dev_warn(&adapter->pdev->dev, "PF still resetting\n"); mac->ops.init_hw(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index fd37d2c203af..d0700d48ecf9 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -213,6 +213,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) case IGC_DEV_ID_I220_V: case IGC_DEV_ID_I225_K: case IGC_DEV_ID_I225_K2: + case IGC_DEV_ID_I226_K: case IGC_DEV_ID_I225_LMVP: case IGC_DEV_ID_I225_IT: case IGC_DEV_ID_I226_LM: diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 55dae7c4703f..9da5f83ce456 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -23,6 +23,7 @@ #define IGC_DEV_ID_I225_K 0x3100 #define IGC_DEV_ID_I225_K2 0x3101 #define IGC_DEV_ID_I225_LMVP 0x5502 +#define IGC_DEV_ID_I226_K 0x5504 #define IGC_DEV_ID_I225_IT 0x0D9F #define IGC_DEV_ID_I226_LM 0x125B #define IGC_DEV_ID_I226_V 0x125C diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index b673ac1199bb..afd6a62da29d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -45,6 +45,7 @@ static const struct pci_device_id igc_pci_tbl[] = { { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, + { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f3f449f53920..393d1c2cd853 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6587,7 +6587,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, - rx_ring->queue_index) < 0) + rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 82fce27f682b..4061cd7db5dd 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3493,7 +3493,7 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, /* XDP RX-queue info */ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, - rx_ring->queue_index) < 0) + rx_ring->queue_index, 0) < 0) goto err; rx_ring->xdp_prog = adapter->xdp_prog; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 4a9041ee1b39..563ceac3060f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1834,8 +1834,13 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, struct netdev_queue *nq, bool napi) { unsigned int bytes_compl = 0, pkts_compl = 0; + struct xdp_frame_bulk bq; int i; + xdp_frame_bulk_init(&bq); + + rcu_read_lock(); /* need for xdp_return_frame_bulk */ + for (i = 0; i < num; i++) { struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; struct mvneta_tx_desc *tx_desc = txq->descs + @@ -1857,9 +1862,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, if (napi && buf->type == MVNETA_TYPE_XDP_TX) xdp_return_frame_rx_napi(buf->xdpf); else - xdp_return_frame(buf->xdpf); + xdp_return_frame_bulk(buf->xdpf, &bq); } } + xdp_flush_frame_bulk(&bq); + + rcu_read_unlock(); netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); } @@ -2025,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) static void mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, - struct xdp_buff *xdp, int sync_len, bool napi) + struct xdp_buff *xdp, struct skb_shared_info *sinfo, + int sync_len) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i; for (i = 0; i < sinfo->nr_frags; i++) page_pool_put_full_page(rxq->page_pool, - skb_frag_page(&sinfo->frags[i]), napi); + skb_frag_page(&sinfo->frags[i]), true); page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), - sync_len, napi); + sync_len, true); } static int @@ -2171,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, u32 frame_sz, struct mvneta_stats *stats) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); unsigned int len, data_len, sync; u32 ret, act; @@ -2191,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, err = xdp_do_redirect(pp->dev, xdp, prog); if (unlikely(err)) { - mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); + mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); ret = MVNETA_XDP_DROPPED; } else { ret = MVNETA_XDP_REDIR; @@ -2202,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); if (ret != MVNETA_XDP_TX) - mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); + mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); break; default: bpf_warn_invalid_xdp_action(act); @@ -2211,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, trace_xdp_exception(pp->dev, prog, act); fallthrough; case XDP_DROP: - mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); + mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync); ret = MVNETA_XDP_DROPPED; stats->xdp_drop++; break; @@ -2269,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, int *size, + struct skb_shared_info *xdp_sinfo, struct page *page) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; int data_len, len; @@ -2289,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, len, dma_dir); rx_desc->buf_phys_addr = 0; - if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { - skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; + if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) { + skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++]; skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); - sinfo->nr_frags++; + + /* last fragment */ + if (len == *size) { + struct skb_shared_info *sinfo; + + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = xdp_sinfo->nr_frags; + memcpy(sinfo->frags, xdp_sinfo->frags, + sinfo->nr_frags * sizeof(skb_frag_t)); + } } else { page_pool_put_full_page(rxq->page_pool, page, true); } @@ -2339,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi, { int rx_proc = 0, rx_todo, refill, size = 0; struct net_device *dev = pp->dev; - struct xdp_buff xdp_buf = { - .frame_sz = PAGE_SIZE, - .rxq = &rxq->xdp_rxq, - }; + struct skb_shared_info sinfo; struct mvneta_stats ps = {}; struct bpf_prog *xdp_prog; u32 desc_status, frame_sz; + struct xdp_buff xdp_buf; + + xdp_buf.data_hard_start = NULL; + xdp_buf.frame_sz = PAGE_SIZE; + xdp_buf.rxq = &rxq->xdp_rxq; + + sinfo.nr_frags = 0; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); @@ -2385,11 +2407,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi, rx_desc->buf_phys_addr = 0; page_pool_put_full_page(rxq->page_pool, page, true); - continue; + goto next; } mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, - &size, page); + &size, &sinfo, page); } /* Middle or Last descriptor */ if (!(rx_status & MVNETA_RXD_LAST_DESC)) @@ -2397,7 +2419,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, continue; if (size) { - mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); goto next; } @@ -2409,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, if (IS_ERR(skb)) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); - mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); u64_stats_update_begin(&stats->syncp); stats->es.skb_alloc_error++; @@ -2426,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi, napi_gro_receive(napi, skb); next: xdp_buf.data_hard_start = NULL; + sinfo.nr_frags = 0; } rcu_read_unlock(); if (xdp_buf.data_hard_start) - mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1); if (ps.xdp_redirect) xdp_do_flush_map(); @@ -3220,7 +3243,7 @@ static int mvneta_create_page_pool(struct mvneta_port *pp, return err; } - err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id); + err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0); if (err < 0) goto err_free_pp; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 834775843067..6bd7e405e830 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -695,6 +695,9 @@ /* Maximum number of supported ports */ #define MVPP2_MAX_PORTS 4 +/* Loopback port index */ +#define MVPP2_LOOPBACK_PORT_INDEX 3 + /* Maximum number of TXQs used by single port */ #define MVPP2_MAX_TXQ 8 @@ -729,22 +732,21 @@ #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) /* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB 0xb000 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 #define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size) ((data_size) >> 6) #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 /* TX FIFO constants */ -#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa -#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 -#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 -#define MVPP2_TX_FIFO_THRESHOLD_10KB \ - (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) -#define MVPP2_TX_FIFO_THRESHOLD_3KB \ - (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP22_TX_FIFO_DATA_SIZE_16KB 16 +#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10 +#define MVPP22_TX_FIFO_DATA_SIZE_3KB 3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */ +#define MVPP2_TX_FIFO_THRESHOLD(kb) \ + ((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ @@ -946,6 +948,9 @@ struct mvpp2 { /* List of pointers to port structures */ int port_count; struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + /* Map of enabled ports */ + unsigned long port_map; + struct mvpp2_tai *tai; /* Number of Tx threads used */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index cea886c5bcb5..afdd22827223 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -2440,8 +2440,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { + struct xdp_frame_bulk bq; int i; + xdp_frame_bulk_init(&bq); + + rcu_read_lock(); /* need for xdp_return_frame_bulk */ + for (i = 0; i < num; i++) { struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; @@ -2454,10 +2459,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, dev_kfree_skb_any(tx_buf->skb); else if (tx_buf->type == MVPP2_TYPE_XDP_TX || tx_buf->type == MVPP2_TYPE_XDP_NDO) - xdp_return_frame(tx_buf->xdpf); + xdp_return_frame_bulk(tx_buf->xdpf, &bq); mvpp2_txq_inc_get(txq_pcpu); } + xdp_flush_frame_bulk(&bq); + + rcu_read_unlock(); } static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, @@ -2606,11 +2614,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); if (priv->percpu_pools) { - err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0); if (err < 0) goto err_free_dma; - err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id); + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0); if (err < 0) goto err_unregister_rxq_short; @@ -6602,32 +6610,56 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } -static void mvpp22_rx_fifo_init(struct mvpp2 *priv) +static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size) { - int port; + int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size); - /* The FIFO size parameters are set depending on the maximum speed a - * given port can handle: - * - Port 0: 10Gbps - * - Port 1: 2.5Gbps - * - Ports 2 and 3: 1Gbps - */ + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size); +} - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), - MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); +/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2. + * 4kB fixed space must be assigned for the loopback port. + * Redistribute remaining avialable 44kB space among all active ports. + * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G + * SGMII link. + */ +static void mvpp22_rx_fifo_init(struct mvpp2 *priv) +{ + int remaining_ports_count; + unsigned long port_map; + int size_remainder; + int port, size; + + /* The loopback requires fixed 4kB of the FIFO space assignment. */ + mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); + + /* Set RX FIFO size to 0 for inactive ports. */ + for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) + mvpp22_rx_fifo_set_hw(priv, port, 0); + + /* Assign remaining RX FIFO space among all active ports. */ + size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB; + remaining_ports_count = hweight_long(port_map); + + for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { + if (remaining_ports_count == 1) + size = size_remainder; + else if (port == 0) + size = max(size_remainder / remaining_ports_count, + MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); + else if (port == 1) + size = max(size_remainder / remaining_ports_count, + MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); + else + size = size_remainder / remaining_ports_count; - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), - MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); + size_remainder -= size; + remaining_ports_count--; - for (port = 2; port < MVPP2_MAX_PORTS; port++) { - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + mvpp22_rx_fifo_set_hw(priv, port, size); } mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, @@ -6635,24 +6667,53 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } -/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G - * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, - * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. +static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size) +{ + int threshold = MVPP2_TX_FIFO_THRESHOLD(size); + + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); + mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold); +} + +/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2. + * 3kB fixed space must be assigned for the loopback port. + * Redistribute remaining avialable 16kB space among all active ports. + * The 10G interface should use 10kB (which is maximum possible size + * per single port). */ static void mvpp22_tx_fifo_init(struct mvpp2 *priv) { - int port, size, thrs; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - if (port == 0) { + int remaining_ports_count; + unsigned long port_map; + int size_remainder; + int port, size; + + /* The loopback requires fixed 3kB of the FIFO space assignment. */ + mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX, + MVPP22_TX_FIFO_DATA_SIZE_3KB); + port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX); + + /* Set TX FIFO size to 0 for inactive ports. */ + for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) + mvpp22_tx_fifo_set_hw(priv, port, 0); + + /* Assign remaining TX FIFO space among all active ports. */ + size_remainder = MVPP22_TX_FIFO_DATA_SIZE_16KB; + remaining_ports_count = hweight_long(port_map); + + for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) { + if (remaining_ports_count == 1) + size = min(size_remainder, + MVPP22_TX_FIFO_DATA_SIZE_10KB); + else if (port == 0) size = MVPP22_TX_FIFO_DATA_SIZE_10KB; - thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; - } else { - size = MVPP22_TX_FIFO_DATA_SIZE_3KB; - thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; - } - mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); - mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); + else + size = size_remainder / remaining_ports_count; + + size_remainder -= size; + remaining_ports_count--; + + mvpp22_tx_fifo_set_hw(priv, port, size); } } @@ -6953,6 +7014,12 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_axi_clk; } + /* Map DTS-active ports. Should be done before FIFO mvpp2_init */ + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (!fwnode_property_read_u32(port_fwnode, "port-id", &i)) + priv->port_map |= BIT(i); + } + /* Initialize network controller */ err = mvpp2_init(pdev, priv); if (err < 0) { diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 543a1d047567..16caa02095fe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -9,6 +9,7 @@ config OCTEONTX2_MBOX config OCTEONTX2_AF tristate "Marvell OcteonTX2 RVU Admin Function driver" select OCTEONTX2_MBOX + select NET_DEVLINK depends on (64BIT && COMPILE_TEST) || ARM64 depends on PCI help diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 2f7a861d0c7b..eb535c98ca38 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o octeontx2_mbox-y := mbox.o rvu_trace.o octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ - rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o + rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ + rvu_cpt.o rvu_devlink.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 8f17e26dca53..7d0f96290943 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -145,6 +145,16 @@ int cgx_get_cgxid(void *cgxd) return cgx->cgx_id; } +u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + u64 cfg; + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG); + + return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -814,8 +824,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx) minor_ver = FIELD_GET(RESP_MINOR_VER, resp); dev_dbg(dev, "Firmware command interface version = %d.%d\n", major_ver, minor_ver); - if (major_ver != CGX_FIRMWARE_MAJOR_VER || - minor_ver != CGX_FIRMWARE_MINOR_VER) + if (major_ver != CGX_FIRMWARE_MAJOR_VER) return -EIO; else return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 27ca3291682b..bcfc3e5f66bb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -27,6 +27,10 @@ /* Registers */ #define CGXX_CMRX_CFG 0x00 +#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59) +#define CMR_P2X_SEL_SHIFT 59ULL +#define CMR_P2X_SEL_NIX0 1ULL +#define CMR_P2X_SEL_NIX1 2ULL #define CMR_EN BIT_ULL(55) #define DATA_PKT_TX_EN BIT_ULL(53) #define DATA_PKT_RX_EN BIT_ULL(54) @@ -142,5 +146,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause); void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable); +u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index f48eb66ed021..17f6f42f4453 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -162,6 +162,8 @@ enum nix_scheduler { #define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull) #define NIX_RX_ACTIONOP_MCAST (0x3ull) #define NIX_RX_ACTIONOP_RSS (0x4ull) +/* Use the RX action set in the default unicast entry */ +#define NIX_RX_ACTION_DEFAULT (0xfull) /* NIX TX action operation*/ #define NIX_TX_ACTIONOP_DROP (0x0ull) @@ -174,8 +176,12 @@ enum nix_scheduler { #define NPC_MCAM_KEY_X2 1 #define NPC_MCAM_KEY_X4 2 -#define NIX_INTF_RX 0 -#define NIX_INTF_TX 1 +#define NIX_INTFX_RX(a) (0x0ull | (a) << 1) +#define NIX_INTFX_TX(a) (0x1ull | (a) << 1) + +/* Default interfaces are NIX0_RX and NIX0_TX */ +#define NIX_INTF_RX NIX_INTFX_RX(0) +#define NIX_INTF_TX NIX_INTFX_TX(0) #define NIX_INTF_TYPE_CGX 0 #define NIX_INTF_TYPE_LBK 1 @@ -206,6 +212,8 @@ enum ndc_idx_e { NIX0_RX = 0x0, NIX0_TX = 0x1, NPA0_U = 0x2, + NIX1_RX = 0x4, + NIX1_TX = 0x5, }; enum ndc_ctype_e { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 263a21129416..f919283ddc34 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -86,7 +86,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0001) +#define OTX2_MBOX_VERSION (0x0007) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -158,6 +158,11 @@ M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ +M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \ + msg_rsp) \ +M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \ +M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ + cpt_rd_wr_reg_msg) \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ npc_mcam_alloc_entry_rsp) \ @@ -188,10 +193,19 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \ npc_mcam_alloc_and_write_entry_rsp) \ M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \ msg_req, npc_get_kex_cfg_rsp) \ +M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \ + npc_install_flow_req, npc_install_flow_rsp) \ +M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ + npc_delete_flow_req, msg_rsp) \ +M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ + npc_mcam_read_entry_req, \ + npc_mcam_read_entry_rsp) \ +M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ + msg_req, npc_mcam_read_base_rule_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ nix_lf_alloc_req, nix_lf_alloc_rsp) \ -M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \ +M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \ M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \ M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ hwctx_disable_req, msg_rsp) \ @@ -200,7 +214,8 @@ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ -M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \ +M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \ + nix_vtag_config_rsp) \ M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \ nix_rss_flowkey_cfg, \ nix_rss_flowkey_cfg_rsp) \ @@ -216,7 +231,6 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \ nix_lso_format_cfg, \ nix_lso_format_cfg_rsp) \ -M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \ M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \ M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ @@ -271,6 +285,17 @@ struct ready_msg_rsp { * or to detach partial of a cetain resource type. * Rest of the fields specify how many of what type to * be attached. + * To request LFs from two blocks of same type this mailbox + * can be sent twice as below: + * struct rsrc_attach *attach; + * .. Allocate memory for message .. + * attach->cptlfs = 3; <3 LFs from CPT0> + * .. Send message .. + * .. Allocate memory for message .. + * attach->modify = 1; + * attach->cpt_blkaddr = BLKADDR_CPT1; + * attach->cptlfs = 2; <2 LFs from CPT1> + * .. Send message .. */ struct rsrc_attach { struct mbox_msghdr hdr; @@ -281,6 +306,7 @@ struct rsrc_attach { u16 ssow; u16 timlfs; u16 cptlfs; + int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */ }; /* Structure for relinquishing resources. @@ -314,6 +340,8 @@ struct msix_offset_rsp { u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; + u8 cpt1_lfs; + u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT]; }; struct get_hw_cap_rsp { @@ -459,6 +487,20 @@ enum nix_af_status { NIX_AF_ERR_LSO_CFG_FAIL = -418, NIX_AF_INVAL_NPA_PF_FUNC = -419, NIX_AF_INVAL_SSO_PF_FUNC = -420, + NIX_AF_ERR_TX_VTAG_NOSPC = -421, + NIX_AF_ERR_RX_VTAG_INUSE = -422, +}; + +/* For NIX RX vtag action */ +enum nix_rx_vtag0_type { + NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */ + NIX_AF_LFX_RX_VTAG_TYPE1, + NIX_AF_LFX_RX_VTAG_TYPE2, + NIX_AF_LFX_RX_VTAG_TYPE3, + NIX_AF_LFX_RX_VTAG_TYPE4, + NIX_AF_LFX_RX_VTAG_TYPE5, + NIX_AF_LFX_RX_VTAG_TYPE6, + NIX_AF_LFX_RX_VTAG_TYPE7, }; /* For NIX LF context alloc and init */ @@ -491,6 +533,16 @@ struct nix_lf_alloc_rsp { u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */ u16 cints; /* NIX_AF_CONST2::CINTS */ u16 qints; /* NIX_AF_CONST2::QINTS */ + u8 cgx_links; /* No. of CGX links present in HW */ + u8 lbk_links; /* No. of LBK links present in HW */ + u8 sdp_links; /* No. of SDP links present in HW */ +}; + +struct nix_lf_free_req { + struct mbox_msghdr hdr; +#define NIX_LF_DISABLE_FLOWS BIT_ULL(0) +#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1) + u64 flags; }; /* NIX AQ enqueue msg */ @@ -583,14 +635,40 @@ struct nix_vtag_config { union { /* valid when cfg_type is '0' */ struct { - /* tx vlan0 tag(C-VLAN) */ - u64 vlan0; - /* tx vlan1 tag(S-VLAN) */ - u64 vlan1; - /* insert tx vlan tag */ - u8 insert_vlan :1; - /* insert tx double vlan tag */ - u8 double_vlan :1; + u64 vtag0; + u64 vtag1; + + /* cfg_vtag0 & cfg_vtag1 fields are valid + * when free_vtag0 & free_vtag1 are '0's. + */ + /* cfg_vtag0 = 1 to configure vtag0 */ + u8 cfg_vtag0 :1; + /* cfg_vtag1 = 1 to configure vtag1 */ + u8 cfg_vtag1 :1; + + /* vtag0_idx & vtag1_idx are only valid when + * both cfg_vtag0 & cfg_vtag1 are '0's, + * these fields are used along with free_vtag0 + * & free_vtag1 to free the nix lf's tx_vlan + * configuration. + * + * Denotes the indices of tx_vtag def registers + * that needs to be cleared and freed. + */ + int vtag0_idx; + int vtag1_idx; + + /* free_vtag0 & free_vtag1 fields are valid + * when cfg_vtag0 & cfg_vtag1 are '0's. + */ + /* free_vtag0 = 1 clears vtag0 configuration + * vtag0_idx denotes the index to be cleared. + */ + u8 free_vtag0 :1; + /* free_vtag1 = 1 clears vtag1 configuration + * vtag1_idx denotes the index to be cleared. + */ + u8 free_vtag1 :1; } tx; /* valid when cfg_type is '1' */ @@ -605,6 +683,17 @@ struct nix_vtag_config { }; }; +struct nix_vtag_config_rsp { + struct mbox_msghdr hdr; + int vtag0_idx; + int vtag1_idx; + /* Indices of tx_vtag def registers used to configure + * tx vtag0 & vtag1 headers, these indices are valid + * when nix_vtag_config mbox requested for vtag0 and/ + * or vtag1 configuration. + */ +}; + struct nix_rss_flowkey_cfg { struct mbox_msghdr hdr; int mcam_index; /* MCAM entry index to modify */ @@ -627,6 +716,7 @@ struct nix_rss_flowkey_cfg { #define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16) #define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17) #define NIX_FLOW_KEY_TYPE_VLAN BIT(20) +#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21) u32 flowkey_cfg; /* Flowkey types selected */ u8 group; /* RSS context or group */ }; @@ -865,6 +955,87 @@ struct npc_get_kex_cfg_rsp { u8 mkex_pfl_name[MKEX_NAME_LEN]; }; +struct flow_msg { + unsigned char dmac[6]; + unsigned char smac[6]; + __be16 etype; + __be16 vlan_etype; + __be16 vlan_tci; + union { + __be32 ip4src; + __be32 ip6src[4]; + }; + union { + __be32 ip4dst; + __be32 ip6dst[4]; + }; + u8 tos; + u8 ip_ver; + u8 ip_proto; + u8 tc; + __be16 sport; + __be16 dport; +}; + +struct npc_install_flow_req { + struct mbox_msghdr hdr; + struct flow_msg packet; + struct flow_msg mask; + u64 features; + u16 entry; + u16 channel; + u8 intf; + u8 set_cntr; /* If counter is available set counter for this entry ? */ + u8 default_rule; + u8 append; /* overwrite(0) or append(1) flow to default rule? */ + u16 vf; + /* action */ + u32 index; + u16 match_id; + u8 flow_key_alg; + u8 op; + /* vtag rx action */ + u8 vtag0_type; + u8 vtag0_valid; + u8 vtag1_type; + u8 vtag1_valid; + /* vtag tx action */ + u16 vtag0_def; + u8 vtag0_op; + u16 vtag1_def; + u8 vtag1_op; +}; + +struct npc_install_flow_rsp { + struct mbox_msghdr hdr; + int counter; /* negative if no counter else counter number */ +}; + +struct npc_delete_flow_req { + struct mbox_msghdr hdr; + u16 entry; + u16 start;/*Disable range of entries */ + u16 end; + u8 all; /* PF + VFs */ +}; + +struct npc_mcam_read_entry_req { + struct mbox_msghdr hdr; + u16 entry; /* MCAM entry to read */ +}; + +struct npc_mcam_read_entry_rsp { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u8 intf; + u8 enable; +}; + +struct npc_mcam_read_base_rule_rsp { + struct mbox_msghdr hdr; + struct mcam_entry entry; +}; + enum ptp_op { PTP_OP_ADJFINE = 0, PTP_OP_GET_CLOCK = 1, @@ -881,4 +1052,32 @@ struct ptp_rsp { u64 clk; }; +/* CPT mailbox error codes + * Range 901 - 1000. + */ +enum cpt_af_status { + CPT_AF_ERR_PARAM = -901, + CPT_AF_ERR_GRP_INVALID = -902, + CPT_AF_ERR_LF_INVALID = -903, + CPT_AF_ERR_ACCESS_DENIED = -904, + CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905, + CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906 +}; + +/* CPT mbox message formats */ +struct cpt_rd_wr_reg_msg { + struct mbox_msghdr hdr; + u64 reg_offset; + u64 *ret_val; + u64 val; + u8 is_write; +}; + +struct cpt_lf_alloc_req_msg { + struct mbox_msghdr hdr; + u16 nix_pf_func; + u16 sso_pf_func; + u16 eng_grpmsk; +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 91a9d00e4fb5..a1f79445db71 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -140,6 +140,63 @@ enum npc_kpu_lh_ltype { NPC_LT_LH_CUSTOM1 = 0xF, }; +/* NPC port kind defines how the incoming or outgoing packets + * are processed. NPC accepts packets from up to 64 pkinds. + * Software assigns pkind for each incoming port such as CGX + * Ethernet interfaces, LBK interfaces, etc. + */ +enum npc_pkind_type { + NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */ +}; + +/* list of known and supported fields in packet header and + * fields present in key structure. + */ +enum key_fields { + NPC_DMAC, + NPC_SMAC, + NPC_ETYPE, + NPC_OUTER_VID, + NPC_TOS, + NPC_SIP_IPV4, + NPC_DIP_IPV4, + NPC_SIP_IPV6, + NPC_DIP_IPV6, + NPC_SPORT_TCP, + NPC_DPORT_TCP, + NPC_SPORT_UDP, + NPC_DPORT_UDP, + NPC_SPORT_SCTP, + NPC_DPORT_SCTP, + NPC_HEADER_FIELDS_MAX, + NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ + NPC_PF_FUNC, /* Valid when Tx */ + NPC_ERRLEV, + NPC_ERRCODE, + NPC_LXMB, + NPC_LA, + NPC_LB, + NPC_LC, + NPC_LD, + NPC_LE, + NPC_LF, + NPC_LG, + NPC_LH, + /* Ethertype for untagged frame */ + NPC_ETYPE_ETHER, + /* Ethertype for single tagged frame */ + NPC_ETYPE_TAG1, + /* Ethertype for double tagged frame */ + NPC_ETYPE_TAG2, + /* outer vlan tci for single tagged frame */ + NPC_VLAN_TAG1, + /* outer vlan tci for double tagged frame */ + NPC_VLAN_TAG2, + /* other header fields programmed to extract but not of our interest */ + NPC_UNKNOWN, + NPC_KEY_FIELDS_MAX, +}; + struct npc_kpu_profile_cam { u8 state; u8 state_mask; @@ -300,11 +357,63 @@ struct nix_rx_action { /* NPC_AF_INTFX_KEX_CFG field masks */ #define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0) +/* NPC_PARSE_KEX_S nibble definitions for each field */ +#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0) +#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3) +#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4) +#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6) +#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7) +#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9) +#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10) +#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12) +#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13) +#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15) +#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16) +#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18) +#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19) +#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21) +#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22) +#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24) +#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25) +#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27) +#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28) +#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30) + +struct nix_tx_action { +#if defined(__BIG_ENDIAN_BITFIELD) + u64 rsvd_63_48 :16; + u64 match_id :16; + u64 index :20; + u64 rsvd_11_8 :8; + u64 op :4; +#else + u64 op :4; + u64 rsvd_11_8 :8; + u64 index :20; + u64 match_id :16; + u64 rsvd_63_48 :16; +#endif +}; + /* NIX Receive Vtag Action Structure */ -#define VTAG0_VALID_BIT BIT_ULL(15) -#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12) -#define VTAG0_LID_MASK GENMASK_ULL(10, 8) -#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) +#define RX_VTAG0_VALID_BIT BIT_ULL(15) +#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12) +#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8) +#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) +#define RX_VTAG1_VALID_BIT BIT_ULL(47) +#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44) +#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40) +#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32) + +/* NIX Transmit Vtag Action Structure */ +#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16) +#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12) +#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8) +#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) +#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48) +#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44) +#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40) +#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32) struct npc_mcam_kex { /* MKEX Profle Header */ @@ -357,4 +466,24 @@ struct npc_lt_def_cfg { struct npc_lt_def pck_iip4; }; +struct rvu_npc_mcam_rule { + struct flow_msg packet; + struct flow_msg mask; + u8 intf; + union { + struct nix_tx_action tx_action; + struct nix_rx_action rx_action; + }; + u64 vtag_action; + struct list_head list; + u64 features; + u16 owner; + u16 entry; + u16 cntr; + bool has_cntr; + u8 default_rule; + bool enable; + bool vfvlan_cfg; +}; + #endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index 77bb4ed32600..b192692b4fc4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -148,6 +148,20 @@ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ ((flags_ena) << 6) | ((key_ofs) & 0x3F)) +/* Rx parse key extract nibble enable */ +#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \ + NPC_PARSE_NIBBLE_LA_LTYPE | \ + NPC_PARSE_NIBBLE_LB_LTYPE | \ + NPC_PARSE_NIBBLE_LC_LTYPE | \ + NPC_PARSE_NIBBLE_LD_LTYPE | \ + NPC_PARSE_NIBBLE_LE_LTYPE) +/* Tx parse key extract nibble enable */ +#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \ + NPC_PARSE_NIBBLE_LB_LTYPE | \ + NPC_PARSE_NIBBLE_LC_LTYPE | \ + NPC_PARSE_NIBBLE_LD_LTYPE | \ + NPC_PARSE_NIBBLE_LE_LTYPE) + enum npc_kpu_parser_state { NPC_S_NA = 0, NPC_S_KPU1_ETHER, @@ -13380,14 +13394,15 @@ static const struct npc_lt_def_cfg npc_lt_defaults = { }, }; -static const struct npc_mcam_kex npc_mkex_default = { +static struct npc_mcam_kex npc_mkex_default = { .mkex_sign = MKEX_SIGN, .name = "default", .kpu_version = NPC_KPU_PROFILE_VER, .keyx_cfg = { - /* nibble: LA..LE (ltype only) + Channel */ - [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247, - [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1), + /* nibble: LA..LE (ltype only) + channel */ + [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX, + /* nibble: LA..LE (ltype only) */ + [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX, }, .intf_lid_lt_ld = { /* Default RX MCAM KEX profile */ @@ -13405,12 +13420,14 @@ static const struct npc_mcam_kex npc_mkex_default = { /* Layer B: Single VLAN (CTAG) */ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ [NPC_LT_LB_CTAG] = { - KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4), + KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4), }, /* Layer B: Stacked VLAN (STAG|QinQ) */ [NPC_LT_LB_STAG_QINQ] = { - /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ - KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4), + /* Outer VLAN: 2 bytes, KW0[63:48] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), + /* Ethertype: 2 bytes, KW0[47:32] */ + KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4), }, [NPC_LT_LB_FDSA] = { /* SWITCH PORT: 1 byte, KW0[63:48] */ @@ -13436,17 +13453,71 @@ static const struct npc_mcam_kex npc_mkex_default = { [NPC_LID_LD] = { /* Layer D:UDP */ [NPC_LT_LD_UDP] = { - /* SPORT: 2 bytes, KW3[15:0] */ - KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18), - /* DPORT: 2 bytes, KW3[31:16] */ - KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a), + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), + }, + /* Layer D:TCP */ + [NPC_LT_LD_TCP] = { + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), + }, + }, + }, + + /* Default TX MCAM KEX profile */ + [NIX_INTF_TX] = { + [NPC_LID_LA] = { + /* Layer A: NIX_INST_HDR_S + Ethernet */ + /* NIX appends 8 bytes of NIX_INST_HDR_S at the + * start of each TX packet supplied to NPC. + */ + [NPC_LT_LA_IH_NIX_ETHER] = { + /* PF_FUNC: 2B , KW0 [47:32] */ + KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4), + /* DMAC: 6 bytes, KW1[63:16] */ + KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa), + }, + }, + [NPC_LID_LB] = { + /* Layer B: Single VLAN (CTAG) */ + [NPC_LT_LB_CTAG] = { + /* CTAG VLAN[2..3] KW0[63:48] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), + /* CTAG VLAN[2..3] KW1[15:0] */ + KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8), + }, + /* Layer B: Stacked VLAN (STAG|QinQ) */ + [NPC_LT_LB_STAG_QINQ] = { + /* Outer VLAN: 2 bytes, KW0[63:48] */ + KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6), + /* Outer VLAN: 2 Bytes, KW1[15:0] */ + KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8), + }, + }, + [NPC_LID_LC] = { + /* Layer C: IPv4 */ + [NPC_LT_LC_IP] = { + /* SIP+DIP: 8 bytes, KW2[63:0] */ + KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10), + /* TOS: 1 byte, KW1[63:56] */ + KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf), + }, + /* Layer C: IPv6 */ + [NPC_LT_LC_IP6] = { + /* Everything up to SADDR: 8 bytes, KW2[63:0] */ + KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10), + }, + }, + [NPC_LID_LD] = { + /* Layer D:UDP */ + [NPC_LT_LD_UDP] = { + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), }, /* Layer D:TCP */ [NPC_LT_LD_TCP] = { - /* SPORT: 2 bytes, KW3[15:0] */ - KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18), - /* DPORT: 2 bytes, KW3[31:16] */ - KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a), + /* SPORT+DPORT: 4 bytes, KW3[31:0] */ + KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18), }, }, }, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index e1f918960730..e8fd712860a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -66,6 +66,7 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu) hw->cap.nix_shaping = true; hw->cap.nix_tx_link_bp = true; hw->cap.nix_rx_multicast = true; + hw->rvu = rvu; if (is_rvu_96xx_B0(rvu)) { hw->cap.nix_fixed_txschq_mapping = true; @@ -210,6 +211,9 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) * multiple blocks of same type. * * @pcifunc has to be zero when no LF is yet attached. + * + * For a pcifunc if LFs are attached from multiple blocks of same type, then + * return blkaddr of first encountered block. */ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) { @@ -258,20 +262,39 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc) devnum = rvu_get_pf(pcifunc); } - /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */ + /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or + * 'BLKADDR_NIX1'. + */ if (blktype == BLKTYPE_NIX) { - reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG; + reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) : + RVU_PRIV_HWVFX_NIXX_CFG(0); cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); - if (cfg) + if (cfg) { blkaddr = BLKADDR_NIX0; + goto exit; + } + + reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) : + RVU_PRIV_HWVFX_NIXX_CFG(1); + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_NIX1; } - /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */ if (blktype == BLKTYPE_CPT) { - reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG; + reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) : + RVU_PRIV_HWVFX_CPTX_CFG(0); cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); - if (cfg) + if (cfg) { blkaddr = BLKADDR_CPT0; + goto exit; + } + + reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) : + RVU_PRIV_HWVFX_CPTX_CFG(1); + cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16)); + if (cfg) + blkaddr = BLKADDR_CPT1; } exit: @@ -306,31 +329,36 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf, block->fn_map[lf] = attach ? pcifunc : 0; - switch (block->type) { - case BLKTYPE_NPA: + switch (block->addr) { + case BLKADDR_NPA: pfvf->npalf = attach ? true : false; num_lfs = pfvf->npalf; break; - case BLKTYPE_NIX: + case BLKADDR_NIX0: + case BLKADDR_NIX1: pfvf->nixlf = attach ? true : false; num_lfs = pfvf->nixlf; break; - case BLKTYPE_SSO: + case BLKADDR_SSO: attach ? pfvf->sso++ : pfvf->sso--; num_lfs = pfvf->sso; break; - case BLKTYPE_SSOW: + case BLKADDR_SSOW: attach ? pfvf->ssow++ : pfvf->ssow--; num_lfs = pfvf->ssow; break; - case BLKTYPE_TIM: + case BLKADDR_TIM: attach ? pfvf->timlfs++ : pfvf->timlfs--; num_lfs = pfvf->timlfs; break; - case BLKTYPE_CPT: + case BLKADDR_CPT0: attach ? pfvf->cptlfs++ : pfvf->cptlfs--; num_lfs = pfvf->cptlfs; break; + case BLKADDR_CPT1: + attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--; + num_lfs = pfvf->cpt1_lfs; + break; } reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg; @@ -466,12 +494,16 @@ static void rvu_reset_all_blocks(struct rvu *rvu) /* Do a HW reset of all RVU blocks */ rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST); + rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST); rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST); } @@ -695,6 +727,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu) u64 *mac; for (pf = 0; pf < hw->total_pfs; pf++) { + /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */ + if (!pf) + goto lbkvf; + if (!is_pf_cgxmapped(rvu, pf)) continue; /* Assign MAC address to PF */ @@ -708,8 +744,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu) } else { eth_random_addr(pfvf->mac_addr); } + ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); - /* Assign MAC address to VFs */ +lbkvf: + /* Assign MAC address to VFs*/ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); for (vf = 0; vf < numvfs; vf++, hwvf++) { pfvf = &rvu->hwvf[hwvf]; @@ -722,6 +760,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu) } else { eth_random_addr(pfvf->mac_addr); } + ether_addr_copy(pfvf->default_mac, pfvf->mac_addr); } } } @@ -757,6 +796,62 @@ static void rvu_fwdata_exit(struct rvu *rvu) iounmap(rvu->fwdata); } +static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid; + u64 cfg; + + /* Init NIX LF's bitmap */ + block = &hw->block[blkaddr]; + if (!block->implemented) + return 0; + blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + block->lf.max = cfg & 0xFFF; + block->addr = blkaddr; + block->type = BLKTYPE_NIX; + block->lfshift = 8; + block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid); + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid); + block->lfcfg_reg = NIX_PRIV_LFX_CFG; + block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; + block->lfreset_reg = NIX_AF_LF_RST; + sprintf(block->name, "NIX%d", blkid); + rvu->nix_blkaddr[blkid] = blkaddr; + return rvu_alloc_bitmap(&block->lf); +} + +static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkid; + u64 cfg; + + /* Init CPT LF's bitmap */ + block = &hw->block[blkaddr]; + if (!block->implemented) + return 0; + blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1; + cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0); + block->lf.max = cfg & 0xFF; + block->addr = blkaddr; + block->type = BLKTYPE_CPT; + block->multislot = true; + block->lfshift = 3; + block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; + block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid); + block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid); + block->lfcfg_reg = CPT_PRIV_LFX_CFG; + block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; + block->lfreset_reg = CPT_AF_LF_RST; + sprintf(block->name, "CPT%d", blkid); + return rvu_alloc_bitmap(&block->lf); +} + static int rvu_setup_hw_resources(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; @@ -791,27 +886,13 @@ static int rvu_setup_hw_resources(struct rvu *rvu) return err; nix: - /* Init NIX LF's bitmap */ - block = &hw->block[BLKADDR_NIX0]; - if (!block->implemented) - goto sso; - cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2); - block->lf.max = cfg & 0xFFF; - block->addr = BLKADDR_NIX0; - block->type = BLKTYPE_NIX; - block->lfshift = 8; - block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG; - block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG; - block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG; - block->lfcfg_reg = NIX_PRIV_LFX_CFG; - block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG; - block->lfreset_reg = NIX_AF_LF_RST; - sprintf(block->name, "NIX"); - err = rvu_alloc_bitmap(&block->lf); + err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0); + if (err) + return err; + err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1); if (err) return err; -sso: /* Init SSO group's bitmap */ block = &hw->block[BLKADDR_SSO]; if (!block->implemented) @@ -877,28 +958,13 @@ tim: return err; cpt: - /* Init CPT LF's bitmap */ - block = &hw->block[BLKADDR_CPT0]; - if (!block->implemented) - goto init; - cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0); - block->lf.max = cfg & 0xFF; - block->addr = BLKADDR_CPT0; - block->type = BLKTYPE_CPT; - block->multislot = true; - block->lfshift = 3; - block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG; - block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG; - block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG; - block->lfcfg_reg = CPT_PRIV_LFX_CFG; - block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG; - block->lfreset_reg = CPT_AF_LF_RST; - sprintf(block->name, "CPT"); - err = rvu_alloc_bitmap(&block->lf); + err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0); + if (err) + return err; + err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1); if (err) return err; -init: /* Allocate memory for PFVF data */ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, sizeof(struct rvu_pfvf), GFP_KERNEL); @@ -1025,7 +1091,30 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, /* Get current count of a RVU block's LF/slots * provisioned to a given RVU func. */ -static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) +u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr) +{ + switch (blkaddr) { + case BLKADDR_NPA: + return pfvf->npalf ? 1 : 0; + case BLKADDR_NIX0: + case BLKADDR_NIX1: + return pfvf->nixlf ? 1 : 0; + case BLKADDR_SSO: + return pfvf->sso; + case BLKADDR_SSOW: + return pfvf->ssow; + case BLKADDR_TIM: + return pfvf->timlfs; + case BLKADDR_CPT0: + return pfvf->cptlfs; + case BLKADDR_CPT1: + return pfvf->cpt1_lfs; + } + return 0; +} + +/* Return true if LFs of block type are attached to pcifunc */ +static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype) { switch (blktype) { case BLKTYPE_NPA: @@ -1033,15 +1122,16 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) case BLKTYPE_NIX: return pfvf->nixlf ? 1 : 0; case BLKTYPE_SSO: - return pfvf->sso; + return !!pfvf->sso; case BLKTYPE_SSOW: - return pfvf->ssow; + return !!pfvf->ssow; case BLKTYPE_TIM: - return pfvf->timlfs; + return !!pfvf->timlfs; case BLKTYPE_CPT: - return pfvf->cptlfs; + return pfvf->cptlfs || pfvf->cpt1_lfs; } - return 0; + + return false; } bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) @@ -1054,7 +1144,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) pfvf = rvu_get_pfvf(rvu, pcifunc); /* Check if this PFFUNC has a LF of type blktype attached */ - if (!rvu_get_rsrc_mapcount(pfvf, blktype)) + if (!is_blktype_attached(pfvf, blktype)) return false; return true; @@ -1093,9 +1183,12 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) if (blkaddr < 0) return; + if (blktype == BLKTYPE_NIX) + rvu_nix_reset_mac(pfvf, pcifunc); + block = &hw->block[blkaddr]; - num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type); + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); if (!num_lfs) return; @@ -1146,6 +1239,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, continue; else if ((blkid == BLKADDR_NIX0) && !detach->nixlf) continue; + else if ((blkid == BLKADDR_NIX1) && !detach->nixlf) + continue; else if ((blkid == BLKADDR_SSO) && !detach->sso) continue; else if ((blkid == BLKADDR_SSOW) && !detach->ssow) @@ -1154,6 +1249,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, continue; else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs) continue; + else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs) + continue; } rvu_detach_block(rvu, pcifunc, block->type); } @@ -1169,8 +1266,73 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu, return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } -static void rvu_attach_block(struct rvu *rvu, int pcifunc, - int blktype, int num_lfs) +static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr = BLKADDR_NIX0, vf; + struct rvu_pfvf *pf; + + /* All CGX mapped PFs are set with assigned NIX block during init */ + if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { + pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + blkaddr = pf->nix_blkaddr; + } else if (is_afvf(pcifunc)) { + vf = pcifunc - 1; + /* Assign NIX based on VF number. All even numbered VFs get + * NIX0 and odd numbered gets NIX1 + */ + blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0; + /* NIX1 is not present on all silicons */ + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + blkaddr = BLKADDR_NIX0; + } + + switch (blkaddr) { + case BLKADDR_NIX1: + pfvf->nix_blkaddr = BLKADDR_NIX1; + pfvf->nix_rx_intf = NIX_INTFX_RX(1); + pfvf->nix_tx_intf = NIX_INTFX_TX(1); + break; + case BLKADDR_NIX0: + default: + pfvf->nix_blkaddr = BLKADDR_NIX0; + pfvf->nix_rx_intf = NIX_INTFX_RX(0); + pfvf->nix_tx_intf = NIX_INTFX_TX(0); + break; + } + + return pfvf->nix_blkaddr; +} + +static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, + u16 pcifunc, struct rsrc_attach *attach) +{ + int blkaddr; + + switch (blktype) { + case BLKTYPE_NIX: + blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc); + break; + case BLKTYPE_CPT: + if (attach->hdr.ver < RVU_MULTI_BLK_VER) + return rvu_get_blkaddr(rvu, blktype, 0); + blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr : + BLKADDR_CPT0; + if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) + return -ENODEV; + break; + default: + return rvu_get_blkaddr(rvu, blktype, 0); + }; + + if (is_block_implemented(rvu->hw, blkaddr)) + return blkaddr; + + return -ENODEV; +} + +static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype, + int num_lfs, struct rsrc_attach *attach) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_hwinfo *hw = rvu->hw; @@ -1182,7 +1344,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc, if (!num_lfs) return; - blkaddr = rvu_get_blkaddr(rvu, blktype, 0); + blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach); if (blkaddr < 0) return; @@ -1211,12 +1373,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, struct rsrc_attach *req, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int free_lfs, mappedlfs, blkaddr; struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; - int free_lfs, mappedlfs; /* Only one NPA LF can be attached */ - if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) { + if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) { block = &hw->block[BLKADDR_NPA]; free_lfs = rvu_rsrc_free_count(&block->lf); if (!free_lfs) @@ -1229,8 +1391,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, } /* Only one NIX LF can be attached */ - if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) { - block = &hw->block[BLKADDR_NIX0]; + if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) { + blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX, + pcifunc, req); + if (blkaddr < 0) + return blkaddr; + block = &hw->block[blkaddr]; free_lfs = rvu_rsrc_free_count(&block->lf); if (!free_lfs) goto fail; @@ -1250,7 +1416,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->sso, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); /* Check if additional resources are available */ if (req->sso > mappedlfs && @@ -1266,7 +1432,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->sso, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->ssow > mappedlfs && ((req->ssow - mappedlfs) > free_lfs)) @@ -1281,7 +1447,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, pcifunc, req->timlfs, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->timlfs > mappedlfs && ((req->timlfs - mappedlfs) > free_lfs)) @@ -1289,14 +1455,18 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, } if (req->cptlfs) { - block = &hw->block[BLKADDR_CPT0]; + blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT, + pcifunc, req); + if (blkaddr < 0) + return blkaddr; + block = &hw->block[blkaddr]; if (req->cptlfs > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid CPTLF req, %d > max %d\n", pcifunc, req->cptlfs, block->lf.max); return -EINVAL; } - mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type); + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); free_lfs = rvu_rsrc_free_count(&block->lf); if (req->cptlfs > mappedlfs && ((req->cptlfs - mappedlfs) > free_lfs)) @@ -1310,6 +1480,22 @@ fail: return -ENOSPC; } +static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype, + struct rsrc_attach *attach) +{ + int blkaddr, num_lfs; + + blkaddr = rvu_get_attach_blkaddr(rvu, blktype, + attach->hdr.pcifunc, attach); + if (blkaddr < 0) + return false; + + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc), + blkaddr); + /* Requester already has LFs from given block ? */ + return !!num_lfs; +} + int rvu_mbox_handler_attach_resources(struct rvu *rvu, struct rsrc_attach *attach, struct msg_rsp *rsp) @@ -1330,10 +1516,10 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu, /* Now attach the requested resources */ if (attach->npalf) - rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1); + rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach); if (attach->nixlf) - rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1); + rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach); if (attach->sso) { /* RVU func doesn't know which exact LF or slot is attached @@ -1343,25 +1529,30 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu, */ if (attach->modify) rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO); - rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, + attach->sso, attach); } if (attach->ssow) { if (attach->modify) rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW); - rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow); + rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, + attach->ssow, attach); } if (attach->timlfs) { if (attach->modify) rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM); - rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs); + rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, + attach->timlfs, attach); } if (attach->cptlfs) { - if (attach->modify) + if (attach->modify && + rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach)) rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT); - rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs); + rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, + attach->cptlfs, attach); } exit: @@ -1439,7 +1630,7 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int lf, slot; + int lf, slot, blkaddr; pfvf = rvu_get_pfvf(rvu, pcifunc); if (!pfvf->msix.bmap) @@ -1449,8 +1640,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0); rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf); - lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0); - rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf); + /* Get BLKADDR from which LFs are attached to pcifunc */ + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) { + rsp->nix_msixoff = MSIX_VECTOR_INVALID; + } else { + lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf); + } rsp->sso = pfvf->sso; for (slot = 0; slot < rsp->sso; slot++) { @@ -1479,6 +1676,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, rsp->cptlf_msixoff[slot] = rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf); } + + rsp->cpt1_lfs = pfvf->cpt1_lfs; + for (slot = 0; slot < rsp->cpt1_lfs; slot++) { + lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot); + rsp->cpt1_lf_msixoff[slot] = + rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf); + } + return 0; } @@ -1932,7 +2137,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), - block->type); + block->addr); if (!num_lfs) return; for (slot = 0; slot < num_lfs; slot++) { @@ -1941,7 +2146,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) continue; /* Cleanup LF and reset it */ - if (block->addr == BLKADDR_NIX0) + if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1) rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); else if (block->addr == BLKADDR_NPA) rvu_npa_lf_teardown(rvu, pcifunc, lf); @@ -1963,7 +2168,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) * 3. Cleanup pools (NPA) */ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); @@ -2445,7 +2652,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu) #define PCI_DEVID_OCTEONTX2_LBK 0xA061 -static int lbk_get_num_chans(void) +int rvu_get_num_lbk_chans(void) { struct pci_dev *pdev; void __iomem *base; @@ -2480,7 +2687,7 @@ static int rvu_enable_sriov(struct rvu *rvu) return 0; } - chans = lbk_get_num_chans(); + chans = rvu_get_num_lbk_chans(); if (chans < 0) return chans; @@ -2619,17 +2826,23 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_flr; + err = rvu_register_dl(rvu); + if (err) + goto err_irq; + rvu_setup_rvum_blk_revid(rvu); /* Enable AF's VFs (if any) */ err = rvu_enable_sriov(rvu); if (err) - goto err_irq; + goto err_dl; /* Initialize debugfs */ rvu_dbg_init(rvu); return 0; +err_dl: + rvu_unregister_dl(rvu); err_irq: rvu_unregister_interrupts(rvu); err_flr: @@ -2661,6 +2874,7 @@ static void rvu_remove(struct pci_dev *pdev) rvu_dbg_exit(rvu); rvu_unregister_interrupts(rvu); + rvu_unregister_dl(rvu); rvu_flr_wq_destroy(rvu); rvu_cgx_exit(rvu); rvu_fwdata_exit(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 90eed3160915..b1a6ecfd563e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -12,9 +12,13 @@ #define RVU_H #include <linux/pci.h> +#include <net/devlink.h> + #include "rvu_struct.h" +#include "rvu_devlink.h" #include "common.h" #include "mbox.h" +#include "npc.h" /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 @@ -28,6 +32,7 @@ #define PCI_MBOX_BAR_NUM 4 #define NAME_SIZE 32 +#define MAX_NIX_BLKS 2 /* PF_FUNC */ #define RVU_PFVF_PF_SHIFT 10 @@ -50,6 +55,7 @@ struct rvu_debugfs { struct dentry *npa; struct dentry *nix; struct dentry *npc; + struct dentry *cpt; struct dump_ctx npa_aura_ctx; struct dump_ctx npa_pool_ctx; struct dump_ctx nix_cq_ctx; @@ -104,6 +110,36 @@ struct nix_mce_list { int max; }; +/* layer metadata to uniquely identify a packet header field */ +struct npc_layer_mdata { + u8 lid; + u8 ltype; + u8 hdr; + u8 key; + u8 len; +}; + +/* Structure to represent a field present in the + * generated key. A key field may present anywhere and can + * be of any size in the generated key. Once this structure + * is populated for fields of interest then field's presence + * and location (if present) can be known. + */ +struct npc_key_field { + /* Masks where all set bits indicate position + * of a field in the key + */ + u64 kw_mask[NPC_MAX_KWS_IN_KEY]; + /* Number of words in the key a field spans. If a field is + * of 16 bytes and key offset is 4 then the field will use + * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and + * nr_kws will be 3(KW0, KW1 and KW2). + */ + int nr_kws; + /* used by packet header fields */ + struct npc_layer_mdata layer_mdata; +}; + struct npc_mcam { struct rsrc_bmap counters; struct mutex lock; /* MCAM entries and counters update lock */ @@ -115,6 +151,7 @@ struct npc_mcam { u16 *entry2cntr_map; u16 *cntr2pfvf_map; u16 *cntr_refcnt; + u16 *entry2target_pffunc; u8 keysize; /* MCAM keysize 112/224/448 bits */ u8 banks; /* Number of MCAM banks */ u8 banks_per_entry;/* Number of keywords in key */ @@ -127,6 +164,12 @@ struct npc_mcam { u16 hprio_count; u16 hprio_end; u16 rx_miss_act_cntr; /* Counter for RX MISS action */ + /* fields present in the generated key */ + struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX]; + struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX]; + u64 tx_features; + u64 rx_features; + struct list_head mcam_rules; }; /* Structure for per RVU func info ie PF/VF */ @@ -137,6 +180,7 @@ struct rvu_pfvf { u16 ssow; u16 cptlfs; u16 timlfs; + u16 cpt1_lfs; u8 cgx_lmac; /* Block LF's MSIX vector info */ @@ -169,19 +213,22 @@ struct rvu_pfvf { u16 maxlen; u16 minlen; + u8 pf_set_vf_cfg; u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ + u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ /* Broadcast pkt replication info */ u16 bcast_mce_idx; struct nix_mce_list bcast_mce_list; - /* VLAN offload */ - struct mcam_entry entry; - int rxvlan_index; - bool rxvlan; + struct rvu_npc_mcam_rule *def_ucast_rule; bool cgx_in_use; /* this PF/VF using CGX? */ int cgx_users; /* number of cgx users - used only by PFs */ + + u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ + u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ + u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ }; struct nix_txsch { @@ -218,12 +265,22 @@ struct nix_lso { u8 in_use; }; +struct nix_txvlan { +#define NIX_TX_VTAG_DEF_MAX 0x400 + struct rsrc_bmap rsrc; + u16 *entry2pfvf_map; + struct mutex rsrc_lock; /* Serialize resource alloc/free */ +}; + struct nix_hw { + int blkaddr; + struct rvu *rvu; struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ struct nix_mcast mcast; struct nix_flowkey flowkey; struct nix_mark_format mark_format; struct nix_lso lso; + struct nix_txvlan txvlan; }; /* RVU block's capabilities or functionality, @@ -251,10 +308,16 @@ struct rvu_hwinfo { u8 lbk_links; u8 sdp_links; u8 npc_kpus; /* No of parser units */ + u8 npc_pkinds; /* No of port kinds */ + u8 npc_intfs; /* No of interfaces */ + u8 npc_kpu_entries; /* No of KPU entries */ + u16 npc_counters; /* No of match stats counters */ + bool npc_ext_set; /* Extended register set */ struct hw_cap cap; struct rvu_block block[BLK_COUNT]; /* Block info */ - struct nix_hw *nix0; + struct nix_hw *nix; + struct rvu *rvu; struct npc_pkind pkind; struct npc_mcam mcam; }; @@ -300,7 +363,7 @@ struct npc_kpu_profile_adapter { const struct npc_lt_def_cfg *lt_def; const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */ const struct npc_kpu_profile *kpu; /* array[kpus] */ - const struct npc_mcam_kex *mkex; + struct npc_mcam_kex *mkex; size_t pkinds; size_t kpus; }; @@ -315,6 +378,7 @@ struct rvu { struct rvu_pfvf *hwvf; struct mutex rsrc_lock; /* Serialize resource alloc/free */ int vfs; /* Number of VFs attached to RVU */ + int nix_blkaddr[MAX_NIX_BLKS]; /* Mbox */ struct mbox_wq_info afpf_wq_info; @@ -361,6 +425,7 @@ struct rvu { #ifdef CONFIG_DEBUG_FS struct rvu_debugfs rvu_dbg; #endif + struct rvu_devlink *rvu_dl; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -420,6 +485,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); +u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); @@ -429,6 +495,7 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); +int rvu_get_num_lbk_chans(void); /* RVU HW reg validation */ enum regmap_block { @@ -485,6 +552,9 @@ int rvu_get_nixlf_count(struct rvu *rvu); void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr); int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add); +struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr); +int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr); +void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -501,8 +571,8 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable); -int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, @@ -513,6 +583,24 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); +bool is_npc_intf_tx(u8 intf); +bool is_npc_intf_rx(u8 intf); +bool is_npc_interface_valid(struct rvu *rvu, u8 intf); +int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); +int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel); +int npc_flow_steering_init(struct rvu *rvu, int blkaddr); +const char *npc_get_field_name(u8 hdr); +bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf, + u16 pcifunc, u8 intf, struct mcam_entry *entry, + int *entry_index); +int npc_get_bank(struct npc_mcam *mcam, int index); +void npc_mcam_enable_flows(struct rvu *rvu, u16 target); +void npc_mcam_disable_flows(struct rvu *rvu, u16 target); +void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, bool enable); +void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 src, struct mcam_entry *entry, + u8 *intf, u8 *ena); #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index fa9152ff5e2a..d298b9357177 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -74,6 +74,20 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) return rvu->cgx_idmap[cgx_id]; } +/* Based on P2X connectivity find mapped NIX block for a PF */ +static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, + int cgx_id, int lmac_id) +{ + struct rvu_pfvf *pfvf = &rvu->pf[pf]; + u8 p2x; + + p2x = cgx_lmac_get_p2x(cgx_id, lmac_id); + /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */ + pfvf->nix_blkaddr = BLKADDR_NIX0; + if (p2x == CMR_P2X_SEL_NIX1) + pfvf->nix_blkaddr = BLKADDR_NIX1; +} + static int rvu_map_cgx_lmac_pf(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; @@ -117,6 +131,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; free_pkind = rvu_alloc_rsrc(&pkind->rsrc); pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; + rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c new file mode 100644 index 000000000000..35261d52c997 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Marvell. */ + +#include <linux/pci.h> +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "mbox.h" +#include "rvu.h" + +/* CPT PF device id */ +#define PCI_DEVID_OTX2_CPT_PF 0xA0FD + +static int get_cpt_pf_num(struct rvu *rvu) +{ + int i, domain_nr, cpt_pf_num = -1; + struct pci_dev *pdev; + + domain_nr = pci_domain_nr(rvu->pdev->bus); + for (i = 0; i < rvu->hw->total_pfs; i++) { + pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0); + if (!pdev) + continue; + + if (pdev->device == PCI_DEVID_OTX2_CPT_PF) { + cpt_pf_num = i; + put_device(&pdev->dev); + break; + } + put_device(&pdev->dev); + } + return cpt_pf_num; +} + +static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc) +{ + int cpt_pf_num = get_cpt_pf_num(rvu); + + if (rvu_get_pf(pcifunc) != cpt_pf_num) + return false; + if (pcifunc & RVU_PFVF_FUNC_MASK) + return false; + + return true; +} + +static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc) +{ + int cpt_pf_num = get_cpt_pf_num(rvu); + + if (rvu_get_pf(pcifunc) != cpt_pf_num) + return false; + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + return false; + + return true; +} + +int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, + struct cpt_lf_alloc_req_msg *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr; + int num_lfs, slot; + u64 val; + + if (req->eng_grpmsk == 0x0) + return CPT_AF_ERR_GRP_INVALID; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return blkaddr; + + block = &rvu->hw->block[blkaddr]; + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), + block->addr); + if (!num_lfs) + return CPT_AF_ERR_LF_INVALID; + + /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ + if (req->nix_pf_func) { + /* If default, use 'this' CPTLF's PFFUNC */ + if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC) + req->nix_pf_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX)) + return CPT_AF_ERR_NIX_PF_FUNC_INVALID; + } + + /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ + if (req->sso_pf_func) { + /* If default, use 'this' CPTLF's PFFUNC */ + if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC) + req->sso_pf_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO)) + return CPT_AF_ERR_SSO_PF_FUNC_INVALID; + } + + for (slot = 0; slot < num_lfs; slot++) { + cptlf = rvu_get_lf(rvu, block, pcifunc, slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + + /* Set CPT LF group and priority */ + val = (u64)req->eng_grpmsk << 48 | 1; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */ + val = (u64)req->nix_pf_func << 48 | + (u64)req->sso_pf_func << 32; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + } + + return 0; +} + +int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr; + int num_lfs, slot; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return blkaddr; + + block = &rvu->hw->block[blkaddr]; + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), + block->addr); + if (!num_lfs) + return CPT_AF_ERR_LF_INVALID; + + for (slot = 0; slot < num_lfs; slot++) { + cptlf = rvu_get_lf(rvu, block, pcifunc, slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + + /* Reset CPT LF group and priority */ + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0); + /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */ + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0); + } + + return 0; +} + +static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) +{ + u64 offset = req->reg_offset; + int blkaddr, num_lfs, lf; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + + /* Registers that can be accessed from PF/VF */ + if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) || + (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) { + if (offset & 7) + return false; + + lf = (offset & 0xFFF) >> 3; + block = &rvu->hw->block[blkaddr]; + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr); + if (lf >= num_lfs) + /* Slot is not valid for that PF/VF */ + return false; + + /* Translate local LF used by VFs to global CPT LF */ + lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], + req->hdr.pcifunc, lf); + if (lf < 0) + return false; + + return true; + } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { + /* Registers that can be accessed from PF */ + switch (offset) { + case CPT_AF_CTL: + case CPT_AF_PF_FUNC: + case CPT_AF_BLK_RST: + case CPT_AF_CONSTANTS1: + return true; + } + + switch (offset & 0xFF000) { + case CPT_AF_EXEX_STS(0): + case CPT_AF_EXEX_CTL(0): + case CPT_AF_EXEX_CTL2(0): + case CPT_AF_EXEX_UCODE_BASE(0): + if (offset & 7) + return false; + break; + default: + return false; + } + return true; + } + return false; +} + +int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, + struct cpt_rd_wr_reg_msg *req, + struct cpt_rd_wr_reg_msg *rsp) +{ + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return blkaddr; + + /* This message is accepted only if sent from CPT PF/VF */ + if (!is_cpt_pf(rvu, req->hdr.pcifunc) && + !is_cpt_vf(rvu, req->hdr.pcifunc)) + return CPT_AF_ERR_ACCESS_DENIED; + + rsp->reg_offset = req->reg_offset; + rsp->ret_val = req->ret_val; + rsp->is_write = req->is_write; + + if (!is_valid_offset(rvu, req)) + return CPT_AF_ERR_ACCESS_DENIED; + + if (req->is_write) + rvu_write64(rvu, blkaddr, req->reg_offset, req->val); + else + rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 77adad4adb1b..d27543c1a166 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -109,6 +109,12 @@ static char *cgx_tx_stats_fields[] = { [CGX_STAT17] = "Control/PAUSE packets sent", }; +enum cpt_eng_type { + CPT_AE_TYPE = 1, + CPT_SE_TYPE = 2, + CPT_IE_TYPE = 3, +}; + #define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \ blk_addr, NDC_AF_CONST) & 0xFF) @@ -224,18 +230,53 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); -static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf, +static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) +{ + struct rvu *rvu = filp->private; + struct pci_dev *pdev = NULL; + char cgx[10], lmac[10]; + struct rvu_pfvf *pfvf; + int pf, domain, blkid; + u8 cgx_id, lmac_id; + u16 pcifunc; + + domain = 2; + seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n"); + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); + if (!pdev) + continue; + + cgx[0] = 0; + lmac[0] = 0; + pcifunc = pf << 10; + pfvf = rvu_get_pfvf(rvu, pcifunc); + + if (pfvf->nix_blkaddr == BLKADDR_NIX0) + blkid = 0; + else + blkid = 1; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, + &lmac_id); + sprintf(cgx, "CGX%d", cgx_id); + sprintf(lmac, "LMAC%d", lmac_id); + seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", + dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); + } + return 0; +} + +RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL); + +static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf, u16 *pcifunc) { struct rvu_block *block; struct rvu_hwinfo *hw; - int blkaddr; - - blkaddr = rvu_get_blkaddr(rvu, blktype, 0); - if (blkaddr < 0) { - dev_warn(rvu->dev, "Invalid blktype\n"); - return false; - } hw = rvu->hw; block = &hw->block[blkaddr]; @@ -291,10 +332,12 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, { void (*print_qsize)(struct seq_file *filp, struct rvu_pfvf *pfvf) = NULL; + struct dentry *current_dir; struct rvu_pfvf *pfvf; struct rvu *rvu; int qsize_id; u16 pcifunc; + int blkaddr; rvu = filp->private; switch (blktype) { @@ -312,7 +355,15 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, return -EINVAL; } - if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc)) + if (blktype == BLKTYPE_NPA) { + blkaddr = BLKADDR_NPA; + } else { + current_dir = filp->file->f_path.dentry->d_parent; + blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? + BLKADDR_NIX1 : BLKADDR_NIX0); + } + + if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -329,6 +380,8 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, struct seq_file *seqfile = filp->private_data; char *cmd_buf, *cmd_buf_tmp, *subtoken; struct rvu *rvu = seqfile->private; + struct dentry *current_dir; + int blkaddr; u16 pcifunc; int ret, lf; @@ -355,7 +408,15 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, goto qsize_write_done; } - if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) { + if (blktype == BLKTYPE_NPA) { + blkaddr = BLKADDR_NPA; + } else { + current_dir = filp->f_path.dentry->d_parent; + blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? + BLKADDR_NIX1 : BLKADDR_NIX0); + } + + if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { ret = -EINVAL; goto qsize_write_done; } @@ -498,7 +559,7 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype) return -EINVAL; } - if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc)) + if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -556,7 +617,7 @@ static int write_npa_ctx(struct rvu *rvu, bool all, int max_id = 0; u16 pcifunc; - if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc)) + if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -704,9 +765,17 @@ static void ndc_cache_stats(struct seq_file *s, int blk_addr, int ctype, int transaction) { u64 req, out_req, lat, cant_alloc; - struct rvu *rvu = s->private; + struct nix_hw *nix_hw; + struct rvu *rvu; int port; + if (blk_addr == BLKADDR_NDC_NPA0) { + rvu = s->private; + } else { + nix_hw = s->private; + rvu = nix_hw->rvu; + } + for (port = 0; port < NDC_MAX_PORT; port++) { req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC (port, ctype, transaction)); @@ -749,9 +818,17 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL); static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) { - struct rvu *rvu = s->private; + struct nix_hw *nix_hw; + struct rvu *rvu; int bank, max_bank; + if (blk_addr == BLKADDR_NDC_NPA0) { + rvu = s->private; + } else { + nix_hw = s->private; + rvu = nix_hw->rvu; + } + max_bank = NDC_MAX_BANK(rvu, blk_addr); for (bank = 0; bank < max_bank; bank++) { seq_printf(s, "BANK:%d\n", bank); @@ -767,16 +844,30 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused) { - return ndc_blk_cache_stats(filp, NIX0_RX, - BLKADDR_NDC_NIX0_RX); + struct nix_hw *nix_hw = filp->private; + int blkaddr = 0; + int ndc_idx = 0; + + blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? + BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); + ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX); + + return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL); static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused) { - return ndc_blk_cache_stats(filp, NIX0_TX, - BLKADDR_NDC_NIX0_TX); + struct nix_hw *nix_hw = filp->private; + int blkaddr = 0; + int ndc_idx = 0; + + blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? + BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); + ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX); + + return ndc_blk_cache_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL); @@ -792,8 +883,14 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL); static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp, void *unused) { - return ndc_blk_hits_miss_stats(filp, - NPA0_U, BLKADDR_NDC_NIX0_RX); + struct nix_hw *nix_hw = filp->private; + int ndc_idx = NPA0_U; + int blkaddr = 0; + + blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? + BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX); + + return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); @@ -801,8 +898,14 @@ RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL); static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, void *unused) { - return ndc_blk_hits_miss_stats(filp, - NPA0_U, BLKADDR_NDC_NIX0_TX); + struct nix_hw *nix_hw = filp->private; + int ndc_idx = NPA0_U; + int blkaddr = 0; + + blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ? + BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX); + + return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr); } RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); @@ -969,7 +1072,8 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, { void (*print_nix_ctx)(struct seq_file *filp, struct nix_aq_enq_rsp *rsp) = NULL; - struct rvu *rvu = filp->private; + struct nix_hw *nix_hw = filp->private; + struct rvu *rvu = nix_hw->rvu; struct nix_aq_enq_req aq_req; struct nix_aq_enq_rsp rsp; char *ctype_string = NULL; @@ -1001,7 +1105,7 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, return -EINVAL; } - if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc)) + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -1053,13 +1157,15 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp, } static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf, - int id, int ctype, char *ctype_string) + int id, int ctype, char *ctype_string, + struct seq_file *m) { + struct nix_hw *nix_hw = m->private; struct rvu_pfvf *pfvf; int max_id = 0; u16 pcifunc; - if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc)) + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -1119,7 +1225,8 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, int ctype) { struct seq_file *m = filp->private_data; - struct rvu *rvu = m->private; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; char *cmd_buf, *ctype_string; int nixlf, id = 0, ret; bool all = false; @@ -1155,7 +1262,7 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp, goto done; } else { ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype, - ctype_string); + ctype_string, m); } done: kfree(cmd_buf); @@ -1259,102 +1366,54 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused) RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write); -static void rvu_dbg_nix_init(struct rvu *rvu) +static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) { - const struct device *dev = &rvu->pdev->dev; - struct dentry *pfile; + struct nix_hw *nix_hw; - rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); - if (!rvu->rvu_dbg.nix) { - dev_err(rvu->dev, "create debugfs dir failed for nix\n"); + if (!is_block_implemented(rvu->hw, blkaddr)) return; - } - - pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_sq_ctx_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_rq_ctx_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_cq_ctx_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_ndc_tx_cache_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_ndc_rx_cache_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, - rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, - rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_qsize_fops); - if (!pfile) - goto create_failed; + if (blkaddr == BLKADDR_NIX0) { + rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root); + nix_hw = &rvu->hw->nix[0]; + } else { + rvu->rvu_dbg.nix = debugfs_create_dir("nix1", + rvu->rvu_dbg.root); + nix_hw = &rvu->hw->nix[1]; + } - return; -create_failed: - dev_err(dev, "Failed to create debugfs dir/file for NIX\n"); - debugfs_remove_recursive(rvu->rvu_dbg.nix); + debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_sq_ctx_fops); + debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_rq_ctx_fops); + debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_cq_ctx_fops); + debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_ndc_tx_cache_fops); + debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_ndc_rx_cache_fops); + debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_ndc_tx_hits_miss_fops); + debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_ndc_rx_hits_miss_fops); + debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, + &rvu_dbg_nix_qsize_fops); } static void rvu_dbg_npa_init(struct rvu *rvu) { - const struct device *dev = &rvu->pdev->dev; - struct dentry *pfile; - rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root); - if (!rvu->rvu_dbg.npa) - return; - - pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, - &rvu_dbg_npa_qsize_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, - &rvu_dbg_npa_aura_ctx_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, - &rvu_dbg_npa_pool_ctx_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, - &rvu_dbg_npa_ndc_cache_fops); - if (!pfile) - goto create_failed; - - pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, - rvu, &rvu_dbg_npa_ndc_hits_miss_fops); - if (!pfile) - goto create_failed; - - return; -create_failed: - dev_err(dev, "Failed to create debugfs dir/file for NPA\n"); - debugfs_remove_recursive(rvu->rvu_dbg.npa); + debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_qsize_fops); + debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_aura_ctx_fops); + debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_pool_ctx_fops); + debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_ndc_cache_fops); + debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu, + &rvu_dbg_npa_ndc_hits_miss_fops); } #define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \ @@ -1488,8 +1547,6 @@ RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); static void rvu_dbg_cgx_init(struct rvu *rvu) { - const struct device *dev = &rvu->pdev->dev; - struct dentry *pfile; int i, lmac_id; char dname[20]; void *cgx; @@ -1510,18 +1567,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.lmac = debugfs_create_dir(dname, rvu->rvu_dbg.cgx); - pfile = debugfs_create_file("stats", 0600, - rvu->rvu_dbg.lmac, cgx, - &rvu_dbg_cgx_stat_fops); - if (!pfile) - goto create_failed; + debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, + cgx, &rvu_dbg_cgx_stat_fops); } } - return; - -create_failed: - dev_err(dev, "Failed to create debugfs dir/file for CGX\n"); - debugfs_remove_recursive(rvu->rvu_dbg.cgx_root); } /* NPC debugfs APIs */ @@ -1565,7 +1614,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) struct rvu *rvu = filp->private; int pf, vf, numvfs, blkaddr; struct npc_mcam *mcam; - u16 pcifunc; + u16 pcifunc, counters; u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -1573,6 +1622,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) return -ENODEV; mcam = &rvu->hw->mcam; + counters = rvu->hw->npc_counters; seq_puts(filp, "\nNPC MCAM info:\n"); /* MCAM keywidth on receive and transmit sides */ @@ -1595,10 +1645,9 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued) seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt); /* MCAM counters */ - cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); - cfg = (cfg >> 48) & 0xFFFF; - seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg); - seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max); + seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters); + seq_printf(filp, "\t\t Reserved \t: %d\n", + counters - mcam->counters.max); seq_printf(filp, "\t\t Available \t: %d\n", rvu_rsrc_free_count(&mcam->counters)); @@ -1650,57 +1699,453 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp, RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL); -static void rvu_dbg_npc_init(struct rvu *rvu) +static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, + struct rvu_npc_mcam_rule *rule) { - const struct device *dev = &rvu->pdev->dev; - struct dentry *pfile; + u8 bit; + + for_each_set_bit(bit, (unsigned long *)&rule->features, 64) { + seq_printf(s, "\t%s ", npc_get_field_name(bit)); + switch (bit) { + case NPC_DMAC: + seq_printf(s, "%pM ", rule->packet.dmac); + seq_printf(s, "mask %pM\n", rule->mask.dmac); + break; + case NPC_SMAC: + seq_printf(s, "%pM ", rule->packet.smac); + seq_printf(s, "mask %pM\n", rule->mask.smac); + break; + case NPC_ETYPE: + seq_printf(s, "0x%x ", ntohs(rule->packet.etype)); + seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype)); + break; + case NPC_OUTER_VID: + seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci)); + seq_printf(s, "mask 0x%x\n", + ntohs(rule->mask.vlan_tci)); + break; + case NPC_TOS: + seq_printf(s, "%d ", rule->packet.tos); + seq_printf(s, "mask 0x%x\n", rule->mask.tos); + break; + case NPC_SIP_IPV4: + seq_printf(s, "%pI4 ", &rule->packet.ip4src); + seq_printf(s, "mask %pI4\n", &rule->mask.ip4src); + break; + case NPC_DIP_IPV4: + seq_printf(s, "%pI4 ", &rule->packet.ip4dst); + seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst); + break; + case NPC_SIP_IPV6: + seq_printf(s, "%pI6 ", rule->packet.ip6src); + seq_printf(s, "mask %pI6\n", rule->mask.ip6src); + break; + case NPC_DIP_IPV6: + seq_printf(s, "%pI6 ", rule->packet.ip6dst); + seq_printf(s, "mask %pI6\n", rule->mask.ip6dst); + break; + case NPC_SPORT_TCP: + case NPC_SPORT_UDP: + case NPC_SPORT_SCTP: + seq_printf(s, "%d ", ntohs(rule->packet.sport)); + seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport)); + break; + case NPC_DPORT_TCP: + case NPC_DPORT_UDP: + case NPC_DPORT_SCTP: + seq_printf(s, "%d ", ntohs(rule->packet.dport)); + seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); + break; + default: + break; + } + } +} + +static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, + struct rvu_npc_mcam_rule *rule) +{ + if (rule->intf == NIX_INTF_TX) { + switch (rule->tx_action.op) { + case NIX_TX_ACTIONOP_DROP: + seq_puts(s, "\taction: Drop\n"); + break; + case NIX_TX_ACTIONOP_UCAST_DEFAULT: + seq_puts(s, "\taction: Unicast to default channel\n"); + break; + case NIX_TX_ACTIONOP_UCAST_CHAN: + seq_printf(s, "\taction: Unicast to channel %d\n", + rule->tx_action.index); + break; + case NIX_TX_ACTIONOP_MCAST: + seq_puts(s, "\taction: Multicast\n"); + break; + case NIX_TX_ACTIONOP_DROP_VIOL: + seq_puts(s, "\taction: Lockdown Violation Drop\n"); + break; + default: + break; + }; + } else { + switch (rule->rx_action.op) { + case NIX_RX_ACTIONOP_DROP: + seq_puts(s, "\taction: Drop\n"); + break; + case NIX_RX_ACTIONOP_UCAST: + seq_printf(s, "\taction: Direct to queue %d\n", + rule->rx_action.index); + break; + case NIX_RX_ACTIONOP_RSS: + seq_puts(s, "\taction: RSS\n"); + break; + case NIX_RX_ACTIONOP_UCAST_IPSEC: + seq_puts(s, "\taction: Unicast ipsec\n"); + break; + case NIX_RX_ACTIONOP_MCAST: + seq_puts(s, "\taction: Multicast\n"); + break; + default: + break; + }; + } +} + +static const char *rvu_dbg_get_intf_name(int intf) +{ + switch (intf) { + case NIX_INTFX_RX(0): + return "NIX0_RX"; + case NIX_INTFX_RX(1): + return "NIX1_RX"; + case NIX_INTFX_TX(0): + return "NIX0_TX"; + case NIX_INTFX_TX(1): + return "NIX1_TX"; + default: + break; + } + + return "unknown"; +} + +static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused) +{ + struct rvu_npc_mcam_rule *iter; + struct rvu *rvu = s->private; + struct npc_mcam *mcam; + int pf, vf = -1; + int blkaddr; + u16 target; + u64 hits; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return 0; + + mcam = &rvu->hw->mcam; + + mutex_lock(&mcam->lock); + list_for_each_entry(iter, &mcam->mcam_rules, list) { + pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + seq_printf(s, "\n\tInstalled by: PF%d ", pf); + + if (iter->owner & RVU_PFVF_FUNC_MASK) { + vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1; + seq_printf(s, "VF%d", vf); + } + seq_puts(s, "\n"); + + seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ? + "RX" : "TX"); + seq_printf(s, "\tinterface: %s\n", + rvu_dbg_get_intf_name(iter->intf)); + seq_printf(s, "\tmcam entry: %d\n", iter->entry); + + rvu_dbg_npc_mcam_show_flows(s, iter); + if (iter->intf == NIX_INTF_RX) { + target = iter->rx_action.pf_func; + pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; + seq_printf(s, "\tForward to: PF%d ", pf); + + if (target & RVU_PFVF_FUNC_MASK) { + vf = (target & RVU_PFVF_FUNC_MASK) - 1; + seq_printf(s, "VF%d", vf); + } + seq_puts(s, "\n"); + } + + rvu_dbg_npc_mcam_show_action(s, iter); + seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no"); + + if (!iter->has_cntr) + continue; + seq_printf(s, "\tcounter: %d\n", iter->cntr); + hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr)); + seq_printf(s, "\thits: %lld\n", hits); + } + mutex_unlock(&mcam->lock); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL); + +static void rvu_dbg_npc_init(struct rvu *rvu) +{ rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root); - if (!rvu->rvu_dbg.npc) - return; - pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, - rvu, &rvu_dbg_npc_mcam_info_fops); - if (!pfile) - goto create_failed; + debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu, + &rvu_dbg_npc_mcam_info_fops); + debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu, + &rvu_dbg_npc_mcam_rules_fops); + debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu, + &rvu_dbg_npc_rx_miss_act_fops); +} - pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, - rvu, &rvu_dbg_npc_rx_miss_act_fops); - if (!pfile) - goto create_failed; +/* CPT debugfs APIs */ +static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type) +{ + struct rvu *rvu = filp->private; + u64 busy_sts = 0, free_sts = 0; + u32 e_min = 0, e_max = 0, e, i; + u16 max_ses, max_ies, max_aes; + int blkaddr; + u64 reg; - return; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return -ENODEV; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); + max_ses = reg & 0xffff; + max_ies = (reg >> 16) & 0xffff; + max_aes = (reg >> 32) & 0xffff; + + switch (eng_type) { + case CPT_AE_TYPE: + e_min = max_ses + max_ies; + e_max = max_ses + max_ies + max_aes; + break; + case CPT_SE_TYPE: + e_min = 0; + e_max = max_ses; + break; + case CPT_IE_TYPE: + e_min = max_ses; + e_max = max_ses + max_ies; + break; + default: + return -EINVAL; + } + + for (e = e_min, i = 0; e < e_max; e++, i++) { + reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e)); + if (reg & 0x1) + busy_sts |= 1ULL << i; + + if (reg & 0x2) + free_sts |= 1ULL << i; + } + seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts); + seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts); -create_failed: - dev_err(dev, "Failed to create debugfs dir/file for NPC\n"); - debugfs_remove_recursive(rvu->rvu_dbg.npc); + return 0; } -void rvu_dbg_init(struct rvu *rvu) +static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused) { - struct device *dev = &rvu->pdev->dev; - struct dentry *pfile; + return cpt_eng_sts_display(filp, CPT_AE_TYPE); +} - rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); - if (!rvu->rvu_dbg.root) { - dev_err(rvu->dev, "%s failed\n", __func__); - return; +RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL); + +static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused) +{ + return cpt_eng_sts_display(filp, CPT_SE_TYPE); +} + +RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL); + +static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused) +{ + return cpt_eng_sts_display(filp, CPT_IE_TYPE); +} + +RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL); + +static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused) +{ + struct rvu *rvu = filp->private; + u16 max_ses, max_ies, max_aes; + u32 e_max, e; + int blkaddr; + u64 reg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return -ENODEV; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); + max_ses = reg & 0xffff; + max_ies = (reg >> 16) & 0xffff; + max_aes = (reg >> 32) & 0xffff; + + e_max = max_ses + max_ies + max_aes; + + seq_puts(filp, "===========================================\n"); + for (e = 0; e < e_max; e++) { + reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e)); + seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e, + reg & 0xff); + reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e)); + seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e, + reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e)); + seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e, + reg); + seq_puts(filp, "===========================================\n"); + } + return 0; +} + +RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL); + +static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused) +{ + struct rvu *rvu = filp->private; + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr; + u64 reg; + u32 lf; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return -ENODEV; + + block = &hw->block[blkaddr]; + if (!block->lf.bmap) + return -ENODEV; + + seq_puts(filp, "===========================================\n"); + for (lf = 0; lf < block->lf.max; lf++) { + reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf)); + seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf)); + seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf)); + seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg); + reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg | + (lf << block->lfshift)); + seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg); + seq_puts(filp, "===========================================\n"); } - pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, - &rvu_dbg_rsrc_status_fops); - if (!pfile) - goto create_failed; + return 0; +} + +RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL); + +static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused) +{ + struct rvu *rvu = filp->private; + u64 reg0, reg1; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return -ENODEV; + + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); + reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); + seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1); + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0)); + reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1)); + seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1); + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0)); + seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0); + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT); + seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0); + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT); + seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0); + reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO); + seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL); + +static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) +{ + struct rvu *rvu; + int blkaddr; + u64 reg; + + rvu = filp->private; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); + if (blkaddr < 0) + return -ENODEV; + + reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); + seq_printf(filp, "CPT instruction requests %llu\n", reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); + seq_printf(filp, "CPT instruction latency %llu\n", reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC); + seq_printf(filp, "CPT NCB read requests %llu\n", reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC); + seq_printf(filp, "CPT NCB read latency %llu\n", reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC); + seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC); + seq_printf(filp, "CPT active cycles pc %llu\n", reg); + reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT); + seq_printf(filp, "CPT clock count pc %llu\n", reg); + + return 0; +} + +RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL); + +static void rvu_dbg_cpt_init(struct rvu *rvu) +{ + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return; + + rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); + + debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_pc_fops); + debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_ae_sts_fops); + debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_se_sts_fops); + debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_ie_sts_fops); + debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_engines_info_fops); + debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_lfs_info_fops); + debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu, + &rvu_dbg_cpt_err_info_fops); +} + +void rvu_dbg_init(struct rvu *rvu) +{ + rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + + debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, + &rvu_dbg_rsrc_status_fops); + debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu, + &rvu_dbg_rvu_pf_cgx_map_fops); rvu_dbg_npa_init(rvu); - rvu_dbg_nix_init(rvu); + rvu_dbg_nix_init(rvu, BLKADDR_NIX0); + + rvu_dbg_nix_init(rvu, BLKADDR_NIX1); rvu_dbg_cgx_init(rvu); rvu_dbg_npc_init(rvu); - - return; - -create_failed: - dev_err(dev, "Failed to create debugfs dir\n"); - debugfs_remove_recursive(rvu->rvu_dbg.root); + rvu_dbg_cpt_init(rvu); } void rvu_dbg_exit(struct rvu *rvu) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c new file mode 100644 index 000000000000..3f9d0ab6d5ae --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Devlink + * + * Copyright (C) 2020 Marvell. + * + */ + +#include<linux/bitfield.h> + +#include "rvu.h" +#include "rvu_reg.h" +#include "rvu_struct.h" + +#define DRV_NAME "octeontx2-af" + +static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name) +{ + int err; + + err = devlink_fmsg_pair_nest_start(fmsg, name); + if (err) + return err; + + return devlink_fmsg_obj_nest_start(fmsg); +} + +static int rvu_report_pair_end(struct devlink_fmsg *fmsg) +{ + int err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return devlink_fmsg_pair_nest_end(fmsg); +} + +static bool rvu_common_request_irq(struct rvu *rvu, int offset, + const char *name, irq_handler_t fn) +{ + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + int rc; + + sprintf(&rvu->irq_name[offset * NAME_SIZE], name); + rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu_dl); + if (rc) + dev_warn(rvu->dev, "Failed to register %s irq\n", name); + else + rvu->irq_allocated[offset] = true; + + return rvu->irq_allocated[offset]; +} + +static void rvu_npa_intr_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter, + "NPA_AF_RVU Error", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT); + npa_event_context->npa_af_rvu_int = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr); + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_gen_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter, + "NPA_AF_GEN Error", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT); + npa_event_context->npa_af_rvu_gen = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr); + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_err_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter, + "NPA_AF_ERR Error", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT); + npa_event_context->npa_af_rvu_err = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr); + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_ras_work(struct work_struct *work) +{ + struct rvu_npa_health_reporters *rvu_npa_health_reporter; + + rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work); + devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter, + "HW NPA_AF_RAS Error reported", + rvu_npa_health_reporter->npa_event_ctx); +} + +static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_npa_event_ctx *npa_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return IRQ_NONE; + + npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS); + npa_event_context->npa_af_rvu_ras = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr); + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work); + + return IRQ_HANDLED; +} + +static void rvu_npa_unregister_interrupts(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + int i, offs, blkaddr; + u64 reg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return; + + reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG); + offs = reg & 0x3FF; + + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL); + + for (i = 0; i < NPA_AF_INT_VEC_CNT; i++) + if (rvu->irq_allocated[offs + i]) { + free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); + rvu->irq_allocated[offs + i] = false; + } +} + +static int rvu_npa_register_interrupts(struct rvu *rvu) +{ + int blkaddr, base; + bool rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return blkaddr; + + /* Get NPA AF MSIX vectors offset. */ + base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff; + if (!base) { + dev_warn(rvu->dev, + "Failed to get NPA_AF_INT vector offsets\n"); + return 0; + } + + /* Register and enable NPA_AF_RVU_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU, + "NPA_AF_RVU_INT", + rvu_npa_af_rvu_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL); + + /* Register and enable NPA_AF_GEN_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN, + "NPA_AF_RVU_GEN", + rvu_npa_af_gen_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL); + + /* Register and enable NPA_AF_ERR_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR, + "NPA_AF_ERR_INT", + rvu_npa_af_err_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL); + + /* Register and enable NPA_AF_RAS interrupt */ + rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON, + "NPA_AF_RAS", + rvu_npa_af_ras_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL); + + return 0; +err: + rvu_npa_unregister_interrupts(rvu); + return rc; +} + +static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx, + enum npa_af_rvu_health health_reporter) +{ + struct rvu_npa_event_ctx *npa_event_context; + unsigned int intr_val, alloc_dis, free_dis; + int err; + + npa_event_context = ctx; + switch (health_reporter) { + case NPA_AF_RVU_GEN: + intr_val = npa_event_context->npa_af_rvu_gen; + err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ", + npa_event_context->npa_af_rvu_gen); + if (err) + return err; + if (intr_val & BIT_ULL(32)) { + err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error"); + if (err) + return err; + } + + free_dis = FIELD_GET(GENMASK(15, 0), intr_val); + if (free_dis & BIT(NPA_INPQ_NIX0_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_NIX0_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_NIX1_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_NIX1_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_SSO)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_TIM)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_DPI)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI"); + if (err) + return err; + } + if (free_dis & BIT(NPA_INPQ_AURA_OP)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA"); + if (err) + return err; + } + + alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val); + if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_SSO)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_TIM)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_DPI)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI"); + if (err) + return err; + } + if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NPA_AF_RVU_ERR: + err = rvu_report_pair_start(fmsg, "NPA_AF_ERR"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ", + npa_event_context->npa_af_rvu_err); + if (err) + return err; + + if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NPA_AF_RVU_RAS: + err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ", + npa_event_context->npa_af_rvu_ras); + if (err) + return err; + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S"); + if (err) + return err; + } + if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NPA_AF_RVU_INTR: + err = rvu_report_pair_start(fmsg, "NPA_AF_RVU"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ", + npa_event_context->npa_af_rvu_int); + if (err) + return err; + if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) { + err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + if (err) + return err; + } + return rvu_report_pair_end(fmsg); + default: + return -EINVAL; + } + + return 0; +} + +static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_npa_event_ctx *npa_ctx; + + npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + + return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) : + rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR); +} + +static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_npa_event_ctx *npa_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return blkaddr; + + if (npa_event_ctx->npa_af_rvu_int) + rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL); + + return 0; +} + +static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_npa_event_ctx *npa_ctx; + + npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + + return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) : + rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN); +} + +static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_npa_event_ctx *npa_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return blkaddr; + + if (npa_event_ctx->npa_af_rvu_gen) + rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL); + + return 0; +} + +static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_npa_event_ctx *npa_ctx; + + npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + + return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) : + rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR); +} + +static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_npa_event_ctx *npa_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return blkaddr; + + if (npa_event_ctx->npa_af_rvu_err) + rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL); + + return 0; +} + +static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_npa_event_ctx *npa_ctx; + + npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx; + + return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) : + rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS); +} + +static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_npa_event_ctx *npa_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); + if (blkaddr < 0) + return blkaddr; + + if (npa_event_ctx->npa_af_rvu_ras) + rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL); + + return 0; +} + +RVU_REPORTERS(hw_npa_intr); +RVU_REPORTERS(hw_npa_gen); +RVU_REPORTERS(hw_npa_err); +RVU_REPORTERS(hw_npa_ras); + +static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl); + +static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl) +{ + struct rvu_npa_health_reporters *rvu_reporters; + struct rvu_npa_event_ctx *npa_event_context; + struct rvu *rvu = rvu_dl->rvu; + + rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL); + if (!rvu_reporters) + return -ENOMEM; + + rvu_dl->rvu_npa_health_reporter = rvu_reporters; + npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL); + if (!npa_event_context) + return -ENOMEM; + + rvu_reporters->npa_event_ctx = npa_event_context; + rvu_reporters->rvu_hw_npa_intr_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter); + } + + rvu_reporters->rvu_hw_npa_gen_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter); + } + + rvu_reporters->rvu_hw_npa_err_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter); + } + + rvu_reporters->rvu_hw_npa_ras_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter); + } + + rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); + if (!rvu_dl->devlink_wq) + goto err; + + INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work); + INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work); + INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work); + INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work); + + return 0; +err: + rvu_npa_health_reporters_destroy(rvu_dl); + return -ENOMEM; +} + +static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl) +{ + struct rvu *rvu = rvu_dl->rvu; + int err; + + err = rvu_npa_register_reporters(rvu_dl); + if (err) { + dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n", + err); + return err; + } + rvu_npa_register_interrupts(rvu); + + return 0; +} + +static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl) +{ + struct rvu_npa_health_reporters *npa_reporters; + struct rvu *rvu = rvu_dl->rvu; + + npa_reporters = rvu_dl->rvu_npa_health_reporter; + + if (!npa_reporters->rvu_hw_npa_ras_reporter) + return; + if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter)) + devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter); + + if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter)) + devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter); + + if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter)) + devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter); + + if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter)) + devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter); + + rvu_npa_unregister_interrupts(rvu); + kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx); + kfree(rvu_dl->rvu_npa_health_reporter); +} + +static int rvu_health_reporters_create(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl; + + rvu_dl = rvu->rvu_dl; + return rvu_npa_health_reporters_create(rvu_dl); +} + +static void rvu_health_reporters_destroy(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl; + + if (!rvu->rvu_dl) + return; + + rvu_dl = rvu->rvu_dl; + rvu_npa_health_reporters_destroy(rvu_dl); +} + +static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + return devlink_info_driver_name_put(req, DRV_NAME); +} + +static const struct devlink_ops rvu_devlink_ops = { + .info_get = rvu_devlink_info_get, +}; + +int rvu_register_dl(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl; + struct devlink *dl; + int err; + + rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL); + if (!rvu_dl) + return -ENOMEM; + + dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); + if (!dl) { + dev_warn(rvu->dev, "devlink_alloc failed\n"); + kfree(rvu_dl); + return -ENOMEM; + } + + err = devlink_register(dl, rvu->dev); + if (err) { + dev_err(rvu->dev, "devlink register failed with error %d\n", err); + devlink_free(dl); + kfree(rvu_dl); + return err; + } + + rvu_dl->dl = dl; + rvu_dl->rvu = rvu; + rvu->rvu_dl = rvu_dl; + + return rvu_health_reporters_create(rvu); +} + +void rvu_unregister_dl(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct devlink *dl = rvu_dl->dl; + + if (!dl) + return; + + rvu_health_reporters_destroy(rvu); + devlink_unregister(dl); + devlink_free(dl); + kfree(rvu_dl); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h new file mode 100644 index 000000000000..d7578fa92ac1 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RVU Devlink + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef RVU_DEVLINK_H +#define RVU_DEVLINK_H + +#define RVU_REPORTERS(_name) \ +static const struct devlink_health_reporter_ops rvu_ ## _name ## _reporter_ops = { \ + .name = #_name, \ + .recover = rvu_ ## _name ## _recover, \ + .dump = rvu_ ## _name ## _dump, \ +} + +enum npa_af_rvu_health { + NPA_AF_RVU_INTR, + NPA_AF_RVU_GEN, + NPA_AF_RVU_ERR, + NPA_AF_RVU_RAS, +}; + +struct rvu_npa_event_ctx { + u64 npa_af_rvu_int; + u64 npa_af_rvu_gen; + u64 npa_af_rvu_err; + u64 npa_af_rvu_ras; +}; + +struct rvu_npa_health_reporters { + struct rvu_npa_event_ctx *npa_event_ctx; + struct devlink_health_reporter *rvu_hw_npa_intr_reporter; + struct work_struct intr_work; + struct devlink_health_reporter *rvu_hw_npa_gen_reporter; + struct work_struct gen_work; + struct devlink_health_reporter *rvu_hw_npa_err_reporter; + struct work_struct err_work; + struct devlink_health_reporter *rvu_hw_npa_ras_reporter; + struct work_struct ras_work; +}; + +struct rvu_devlink { + struct devlink *dl; + struct rvu *rvu; + struct workqueue_struct *devlink_wq; + struct rvu_npa_health_reporters *rvu_npa_health_reporter; +}; + +/* Devlink APIs */ +int rvu_register_dl(struct rvu *rvu); +void rvu_unregister_dl(struct rvu *rvu); + +#endif /* RVU_DEVLINK_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 21a89dd76d3c..a8dfbb6d1774 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -17,6 +17,7 @@ #include "npc.h" #include "cgx.h" +static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id); @@ -68,6 +69,23 @@ struct mce { u16 pcifunc; }; +int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr) +{ + int i = 0; + + /*If blkaddr is 0, return the first nix block address*/ + if (blkaddr == 0) + return rvu->nix_blkaddr[blkaddr]; + + while (i + 1 < MAX_NIX_BLKS) { + if (rvu->nix_blkaddr[i] == blkaddr) + return rvu->nix_blkaddr[i + 1]; + i++; + } + + return 0; +} + bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -81,14 +99,16 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) int rvu_get_nixlf_count(struct rvu *rvu) { + int blkaddr = 0, max = 0; struct rvu_block *block; - int blkaddr; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); - if (blkaddr < 0) - return 0; - block = &rvu->hw->block[blkaddr]; - return block->lf.max; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + block = &rvu->hw->block[blkaddr]; + max += block->lf.max; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } + return max; } int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr) @@ -130,11 +150,18 @@ static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count) return idx; } -static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) +struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) { - if (blkaddr == BLKADDR_NIX0 && hw->nix0) - return hw->nix0; + int nix_blkaddr = 0, i = 0; + struct rvu *rvu = hw->rvu; + nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); + while (nix_blkaddr) { + if (blkaddr == nix_blkaddr && hw->nix) + return &hw->nix[i]; + nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr); + i++; + } return NULL; } @@ -187,8 +214,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int pkind, pf, vf, lbkid; u8 cgx_id, lmac_id; - int pkind, pf, vf; int err; pf = rvu_get_pf(pcifunc); @@ -221,13 +248,24 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + /* If NIX1 block is present on the silicon then NIXes are + * assigned alternatively for lbk interfaces. NIX0 should + * send packets on lbk link 1 channels and NIX1 should send + * on lbk link 0 channels for the communication between + * NIX0 and NIX1. + */ + lbkid = 0; + if (rvu->hw->lbk_links > 1) + lbkid = vf & 0x1 ? 0 : 1; + /* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. */ - pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); - pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : - NIX_CHAN_LBK_CHX(0, vf + 1); + pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); + pfvf->tx_chan_base = vf & 0x1 ? + NIX_CHAN_LBK_CHX(lbkid, vf - 1) : + NIX_CHAN_LBK_CHX(lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, @@ -265,7 +303,6 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) pfvf->maxlen = 0; pfvf->minlen = 0; - pfvf->rxvlan = false; /* Remove this PF_FUNC from bcast pkt replication list */ err = nix_update_bcast_mce_list(rvu, pcifunc, false); @@ -612,8 +649,9 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return 0; } -static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, - struct nix_aq_enq_rsp *rsp) +static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; @@ -626,10 +664,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, bool ena; u64 cfg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - + blkaddr = nix_hw->blkaddr; block = &hw->block[blkaddr]; aq = block->aq; if (!aq) { @@ -669,8 +704,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, break; case NIX_AQ_CTYPE_MCE: cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG); + /* Check if index exceeds MCE list length */ - if (!hw->nix0->mcast.mce_ctx || + if (!nix_hw->mcast.mce_ctx || (req->qidx >= (256UL << (cfg & 0xF)))) rc = NIX_AF_ERR_AQ_ENQUEUE; @@ -832,6 +868,23 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, return 0; } +static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, + struct nix_aq_enq_rsp *rsp) +{ + struct nix_hw *nix_hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); +} + static const char *nix_get_ctx_name(int ctype) { switch (ctype) { @@ -1129,6 +1182,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Config Rx pkt length, csum checks and apad enable / disable */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); + /* Configure pkind for TX parse config */ + cfg = NPC_TX_DEF_PKIND; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; err = nix_interface_init(rvu, pcifunc, intf, nixlf); if (err) @@ -1137,6 +1194,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Disable NPC entries as NIXLF's contexts are not initialized yet */ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + /* Configure RX VTAG Type 7 (strip) for vf vlan */ + rvu_write64(rvu, blkaddr, + NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7), + VTAGSIZE_T4 | VTAG_STRIP); + goto exit; free_mem: @@ -1164,10 +1226,14 @@ exit: cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); rsp->qints = ((cfg >> 12) & 0xFFF); rsp->cints = ((cfg >> 24) & 0xFFF); + rsp->cgx_links = hw->cgx_links; + rsp->lbk_links = hw->lbk_links; + rsp->sdp_links = hw->sdp_links; + return rc; } -int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -1186,6 +1252,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + if (req->flags & NIX_LF_DISABLE_FLOWS) + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + else + rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); + + /* Free any tx vtag def entries used by this NIX LF */ + if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG)) + nix_free_tx_vtag_entries(rvu, pcifunc); + nix_interface_deinit(rvu, pcifunc, nixlf); /* Reset this NIX LF */ @@ -1914,9 +1989,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, { u64 regval = req->vtag_size; - if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) + if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 || + req->vtag_size > VTAGSIZE_T8) return -EINVAL; + /* RX VTAG Type 7 reserved for vf vlan */ + if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7) + return NIX_AF_ERR_RX_VTAG_INUSE; + if (req->rx.capture_vtag) regval |= BIT_ULL(5); if (req->rx.strip_vtag) @@ -1927,9 +2007,149 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, return 0; } +static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, + u16 pcifunc, int index) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + + if (vlan->entry2pfvf_map[index] != pcifunc) + return NIX_AF_ERR_PARAM; + + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull); + + vlan->entry2pfvf_map[index] = 0; + rvu_free_rsrc(&vlan->rsrc, index); + + return 0; +} + +static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc) +{ + struct nix_txvlan *vlan; + struct nix_hw *nix_hw; + int index, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + vlan = &nix_hw->txvlan; + + mutex_lock(&vlan->rsrc_lock); + /* Scan all the entries and free the ones mapped to 'pcifunc' */ + for (index = 0; index < vlan->rsrc.max; index++) { + if (vlan->entry2pfvf_map[index] == pcifunc) + nix_tx_vtag_free(rvu, blkaddr, pcifunc, index); + } + mutex_unlock(&vlan->rsrc_lock); +} + +static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, + u64 vtag, u8 size) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + u64 regval; + int index; + + mutex_lock(&vlan->rsrc_lock); + + index = rvu_alloc_rsrc(&vlan->rsrc); + if (index < 0) { + mutex_unlock(&vlan->rsrc_lock); + return index; + } + + mutex_unlock(&vlan->rsrc_lock); + + regval = size ? vtag : vtag << 32; + + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_DATA(index), regval); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_VTAG_DEFX_CTL(index), size); + + return index; +} + +static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, + struct nix_vtag_config *req) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + u16 pcifunc = req->hdr.pcifunc; + int idx0 = req->tx.vtag0_idx; + int idx1 = req->tx.vtag1_idx; + int err = 0; + + if (req->tx.free_vtag0 && req->tx.free_vtag1) + if (vlan->entry2pfvf_map[idx0] != pcifunc || + vlan->entry2pfvf_map[idx1] != pcifunc) + return NIX_AF_ERR_PARAM; + + mutex_lock(&vlan->rsrc_lock); + + if (req->tx.free_vtag0) { + err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0); + if (err) + goto exit; + } + + if (req->tx.free_vtag1) + err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1); + +exit: + mutex_unlock(&vlan->rsrc_lock); + return err; +} + +static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, + struct nix_vtag_config *req, + struct nix_vtag_config_rsp *rsp) +{ + struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); + struct nix_txvlan *vlan = &nix_hw->txvlan; + u16 pcifunc = req->hdr.pcifunc; + + if (req->tx.cfg_vtag0) { + rsp->vtag0_idx = + nix_tx_vtag_alloc(rvu, blkaddr, + req->tx.vtag0, req->vtag_size); + + if (rsp->vtag0_idx < 0) + return NIX_AF_ERR_TX_VTAG_NOSPC; + + vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc; + } + + if (req->tx.cfg_vtag1) { + rsp->vtag1_idx = + nix_tx_vtag_alloc(rvu, blkaddr, + req->tx.vtag1, req->vtag_size); + + if (rsp->vtag1_idx < 0) + goto err_free; + + vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc; + } + + return 0; + +err_free: + if (req->tx.cfg_vtag0) + nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx); + + return NIX_AF_ERR_TX_VTAG_NOSPC; +} + int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, - struct msg_rsp *rsp) + struct nix_vtag_config_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; int blkaddr, nixlf, err; @@ -1939,19 +2159,28 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, return err; if (req->cfg_type) { + /* rx vtag configuration */ err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req); if (err) return NIX_AF_ERR_PARAM; } else { - /* TODO: handle tx vtag configuration */ - return 0; + /* tx vtag configuration */ + if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) && + (req->tx.free_vtag0 || req->tx.free_vtag1)) + return NIX_AF_ERR_PARAM; + + if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1) + return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp); + + if (req->tx.free_vtag0 || req->tx.free_vtag1) + return nix_tx_vtag_decfg(rvu, blkaddr, req); } return 0; } -static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, - u16 pcifunc, int next, bool eol) +static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw, + int mce, u8 op, u16 pcifunc, int next, bool eol) { struct nix_aq_enq_req aq_req; int err; @@ -1971,7 +2200,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, /* All fields valid */ *(u64 *)(&aq_req.mce_mask) = ~0ULL; - err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL); + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL); if (err) { dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n", rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); @@ -2077,9 +2306,9 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) next_idx = idx + 1; /* EOL should be set in last MCE */ - err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE, - mce->pcifunc, next_idx, - (next_idx > last_idx) ? true : false); + err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE, + mce->pcifunc, next_idx, + (next_idx > last_idx) ? true : false); if (err) goto end; idx++; @@ -2108,6 +2337,11 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) numvfs = (cfg >> 12) & 0xFF; pfvf = &rvu->pf[pf]; + + /* This NIX0/1 block mapped to PF ? */ + if (pfvf->nix_blkaddr != nix_hw->blkaddr) + continue; + /* Save the start MCE */ pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1); @@ -2122,9 +2356,10 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw) * Will be updated when a NIXLF is attached/detached to * these PF/VFs. */ - err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx, - NIX_AQ_INSTOP_INIT, - pcifunc, 0, true); + err = nix_blk_setup_mce(rvu, nix_hw, + pfvf->bcast_mce_idx + idx, + NIX_AQ_INSTOP_INIT, + pcifunc, 0, true); if (err) return err; } @@ -2176,6 +2411,31 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) return nix_setup_bcast_tables(rvu, nix_hw); } +static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw) +{ + struct nix_txvlan *vlan = &nix_hw->txvlan; + int err; + + /* Allocate resource bimap for tx vtag def registers*/ + vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX; + err = rvu_alloc_bitmap(&vlan->rsrc); + if (err) + return -ENOMEM; + + /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ + vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max, + sizeof(u16), GFP_KERNEL); + if (!vlan->entry2pfvf_map) + goto free_mem; + + mutex_init(&vlan->rsrc_lock); + return 0; + +free_mem: + kfree(vlan->rsrc.bmap); + return -ENOMEM; +} + static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { struct nix_txsch *txsch; @@ -2366,6 +2626,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) /* This should be set to 1, when SEL_CHAN is set */ field->bytesm1 = 1; break; + case NIX_FLOW_KEY_TYPE_IPV4_PROTO: + field->lid = NPC_LID_LC; + field->hdr_offset = 9; /* offset */ + field->bytesm1 = 0; /* 1 byte */ + field->ltype_match = NPC_LT_LC_IP; + field->ltype_mask = 0xF; + break; case NIX_FLOW_KEY_TYPE_IPV4: case NIX_FLOW_KEY_TYPE_INNR_IPV4: field->lid = NPC_LID_LC; @@ -2680,6 +2947,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) { + bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK; u16 pcifunc = req->hdr.pcifunc; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; @@ -2690,13 +2958,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, pfvf = rvu_get_pfvf(rvu, pcifunc); + /* VF can't overwrite admin(PF) changes */ + if (from_vf && pfvf->pf_set_vf_cfg) + return -EPERM; + ether_addr_copy(pfvf->mac_addr, req->mac_addr); rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); - return 0; } @@ -2743,9 +3013,6 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, else rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, allmulti); - - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); - return 0; } @@ -2882,65 +3149,6 @@ linkcfg: return 0; } -int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp) -{ - struct npc_mcam_alloc_entry_req alloc_req = { }; - struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; - struct npc_mcam_free_entry_req free_req = { }; - u16 pcifunc = req->hdr.pcifunc; - int blkaddr, nixlf, err; - struct rvu_pfvf *pfvf; - - /* LBK VFs do not have separate MCAM UCAST entry hence - * skip allocating rxvlan for them - */ - if (is_afvf(pcifunc)) - return 0; - - pfvf = rvu_get_pfvf(rvu, pcifunc); - if (pfvf->rxvlan) - return 0; - - /* alloc new mcam entry */ - alloc_req.hdr.pcifunc = pcifunc; - alloc_req.count = 1; - - err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, - &alloc_rsp); - if (err) - return err; - - /* update entry to enable rxvlan offload */ - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) { - err = NIX_AF_ERR_AF_LF_INVALID; - goto free_entry; - } - - nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) { - err = NIX_AF_ERR_AF_LF_INVALID; - goto free_entry; - } - - pfvf->rxvlan_index = alloc_rsp.entry_list[0]; - /* all it means is that rxvlan_index is valid */ - pfvf->rxvlan = true; - - err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); - if (err) - goto free_entry; - - return 0; -free_entry: - free_req.hdr.pcifunc = pcifunc; - free_req.entry = alloc_rsp.entry_list[0]; - rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); - pfvf->rxvlan = false; - return err; -} - int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, struct msg_rsp *rsp) { @@ -3109,17 +3317,15 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) return 0; } -int rvu_nix_init(struct rvu *rvu) +static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) { const struct npc_lt_def_cfg *ltdefs; struct rvu_hwinfo *hw = rvu->hw; + int blkaddr = nix_hw->blkaddr; struct rvu_block *block; - int blkaddr, err; + int err; u64 cfg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); - if (blkaddr < 0) - return 0; block = &hw->block[blkaddr]; if (is_rvu_96xx_B0(rvu)) { @@ -3153,7 +3359,7 @@ int rvu_nix_init(struct rvu *rvu) hw->cgx = (cfg >> 12) & 0xF; hw->lmac_per_cgx = (cfg >> 8) & 0xF; hw->cgx_links = hw->cgx * hw->lmac_per_cgx; - hw->lbk_links = 1; + hw->lbk_links = (cfg >> 24) & 0xF; hw->sdp_links = 1; /* Initialize admin queue */ @@ -3164,26 +3370,25 @@ int rvu_nix_init(struct rvu *rvu) /* Restore CINT timer delay to HW reset values */ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); - if (blkaddr == BLKADDR_NIX0) { - hw->nix0 = devm_kzalloc(rvu->dev, - sizeof(struct nix_hw), GFP_KERNEL); - if (!hw->nix0) - return -ENOMEM; + if (is_block_implemented(hw, blkaddr)) { + err = nix_setup_txschq(rvu, nix_hw, blkaddr); + if (err) + return err; - err = nix_setup_txschq(rvu, hw->nix0, blkaddr); + err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr); if (err) return err; - err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr); + err = nix_setup_mcast(rvu, nix_hw, blkaddr); if (err) return err; - err = nix_setup_mcast(rvu, hw->nix0, blkaddr); + err = nix_setup_txvlan(rvu, nix_hw); if (err) return err; /* Configure segmentation offload formats */ - nix_setup_lso(rvu, hw->nix0, blkaddr); + nix_setup_lso(rvu, nix_hw, blkaddr); /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. * This helps HW protocol checker to identify headers @@ -3236,23 +3441,45 @@ int rvu_nix_init(struct rvu *rvu) return 0; } -void rvu_nix_freemem(struct rvu *rvu) +int rvu_nix_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; - struct rvu_block *block; + struct nix_hw *nix_hw; + int blkaddr = 0, err; + int i = 0; + + hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw), + GFP_KERNEL); + if (!hw->nix) + return -ENOMEM; + + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + nix_hw = &hw->nix[i]; + nix_hw->rvu = rvu; + nix_hw->blkaddr = blkaddr; + err = rvu_nix_block_init(rvu, nix_hw); + if (err) + return err; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + i++; + } + + return 0; +} + +static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, + struct rvu_block *block) +{ struct nix_txsch *txsch; struct nix_mcast *mcast; + struct nix_txvlan *vlan; struct nix_hw *nix_hw; - int blkaddr, lvl; + int lvl; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); - if (blkaddr < 0) - return; - - block = &hw->block[blkaddr]; rvu_aq_free(rvu, block->aq); - if (blkaddr == BLKADDR_NIX0) { + if (is_block_implemented(rvu->hw, blkaddr)) { nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) return; @@ -3262,6 +3489,11 @@ void rvu_nix_freemem(struct rvu *rvu) kfree(txsch->schq.bmap); } + vlan = &nix_hw->txvlan; + kfree(vlan->rsrc.bmap); + mutex_destroy(&vlan->rsrc_lock); + devm_kfree(rvu->dev, vlan->entry2pfvf_map); + mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mcast_buf); @@ -3269,6 +3501,20 @@ void rvu_nix_freemem(struct rvu *rvu) } } +void rvu_nix_freemem(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + int blkaddr = 0; + + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + block = &hw->block[blkaddr]; + rvu_nix_block_freemem(rvu, blkaddr, block); + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } +} + int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -3281,6 +3527,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); + npc_mcam_enable_flows(rvu, pcifunc); + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } @@ -3296,6 +3544,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + npc_mcam_disable_flows(rvu, pcifunc); + return rvu_cgx_start_stop_io(rvu, pcifunc, false); } @@ -3308,6 +3558,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) ctx_req.hdr.pcifunc = pcifunc; /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ + rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf); nix_interface_deinit(rvu, pcifunc, nixlf); nix_rx_sync(rvu, blkaddr); nix_txschq_free(rvu, pcifunc); @@ -3431,3 +3683,12 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, return 0; } + +void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) +{ + bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + + /* overwrite vf mac address with default_mac */ + if (from_vf) + ether_addr_copy(pfvf->mac_addr, pfvf->default_mac); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 67471cb2b129..24c2bfdfec4e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -497,18 +497,14 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) int rvu_npa_init(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; - int blkaddr, err; + int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0); if (blkaddr < 0) return 0; /* Initialize admin queue */ - err = npa_aq_init(rvu, &hw->block[blkaddr]); - if (err) - return err; - - return 0; + return npa_aq_init(rvu, &hw->block[blkaddr]); } void rvu_npa_freemem(struct rvu *rvu) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 511b01dd03ed..5cf9b7a907ae 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -28,6 +28,8 @@ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 #define NPC_HW_TSTAMP_OFFSET 8 +#define NPC_KEX_CHAN_MASK 0xFFFULL +#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL static const char def_pfl_name[] = "default"; @@ -36,6 +38,81 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, u16 pcifunc); +bool is_npc_intf_tx(u8 intf) +{ + return !!(intf & 0x1); +} + +bool is_npc_intf_rx(u8 intf) +{ + return !(intf & 0x1); +} + +bool is_npc_interface_valid(struct rvu *rvu, u8 intf) +{ + struct rvu_hwinfo *hw = rvu->hw; + + return intf < hw->npc_intfs; +} + +int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) +{ + /* Due to a HW issue in these silicon versions, parse nibble enable + * configuration has to be identical for both Rx and Tx interfaces. + */ + if (is_rvu_96xx_B0(rvu)) + return nibble_ena; + return 0; +} + +static int npc_mcam_verify_pf_func(struct rvu *rvu, + struct mcam_entry *entry_data, u8 intf, + u16 pcifunc) +{ + u16 pf_func, pf_func_mask; + + if (is_npc_intf_rx(intf)) + return 0; + + pf_func_mask = (entry_data->kw_mask[0] >> 32) & + NPC_KEX_PF_FUNC_MASK; + pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; + + pf_func = be16_to_cpu((__force __be16)pf_func); + if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || + ((pf_func & ~RVU_PFVF_FUNC_MASK) != + (pcifunc & ~RVU_PFVF_FUNC_MASK))) + return -EINVAL; + + return 0; +} + +int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) +{ + int pf = rvu_get_pf(pcifunc); + u8 cgx_id, lmac_id; + int base = 0, end; + + if (is_npc_intf_tx(intf)) + return 0; + + if (is_afvf(pcifunc)) { + end = rvu_get_num_lbk_chans(); + if (end < 0) + return -EINVAL; + } else { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0); + /* CGX mapped functions has maximum of 16 channels */ + end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF); + } + + if (channel < base || channel > end) + return -EINVAL; + + return 0; +} + void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -94,6 +171,31 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable) return 0; } +static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc, + int nixlf) +{ + struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam); + struct rvu *rvu = hw->rvu; + int blkaddr = 0, max = 0; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* Given a PF/VF and NIX LF number calculate the unicast mcam + * entry index based on the NIX block assigned to the PF/VF. + */ + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + if (pfvf->nix_blkaddr == blkaddr) + break; + block = &rvu->hw->block[blkaddr]; + max += block->lf.max; + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } + + return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF; +} + static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type) { @@ -114,10 +216,10 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, return index + 1; } - return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF)); + return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf); } -static int npc_get_bank(struct npc_mcam *mcam, int index) +int npc_get_bank(struct npc_mcam *mcam, int index) { int bank = index / mcam->banksize; @@ -139,8 +241,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, return (cfg & 1); } -static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, - int blkaddr, int index, bool enable) +void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, bool enable) { int bank = npc_get_bank(mcam, index); int actbank = bank; @@ -257,6 +359,93 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx, *cam0 = ~*cam1 & kw_mask; } +static void npc_fill_entryword(struct mcam_entry *entry, int idx, + u64 cam0, u64 cam1) +{ + /* Similar to npc_get_keyword, but fills mcam_entry structure from + * CAM registers. + */ + switch (idx) { + case 0: + entry->kw[0] = cam1; + entry->kw_mask[0] = cam1 ^ cam0; + break; + case 1: + entry->kw[1] = cam1; + entry->kw_mask[1] = cam1 ^ cam0; + break; + case 2: + entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48; + entry->kw[2] = (cam1 >> 16) & CAM_MASK(48); + entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; + entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48); + break; + case 3: + entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48; + entry->kw[3] = (cam1 >> 16) & CAM_MASK(32); + entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48; + entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32); + break; + case 4: + entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32; + entry->kw[4] = (cam1 >> 32) & CAM_MASK(32); + entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; + entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32); + break; + case 5: + entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32; + entry->kw[5] = (cam1 >> 32) & CAM_MASK(16); + entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32; + entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16); + break; + case 6: + entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16; + entry->kw[6] = (cam1 >> 48) & CAM_MASK(16); + entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; + entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16); + break; + case 7: + entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16; + entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16; + break; + } +} + +static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, int index, + struct mcam_entry *entry) +{ + u16 owner, target_func; + struct rvu_pfvf *pfvf; + int bank, nixlf; + u64 rx_action; + + owner = mcam->entry2pfvf_map[index]; + target_func = (entry->action >> 4) & 0xffff; + /* return incase target is PF or LBK or rule owner is not PF */ + if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) || + !(target_func & RVU_PFVF_FUNC_MASK)) + return; + + pfvf = rvu_get_pfvf(rvu, target_func); + mcam->entry2target_pffunc[index] = target_func; + /* return if nixlf is not attached or initialized */ + if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule) + return; + + /* get VF ucast entry rule */ + nix_get_nixlf(rvu, target_func, &nixlf, NULL); + index = npc_get_nixlf_mcam_index(mcam, target_func, + nixlf, NIXLF_UCAST_ENTRY); + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + + rx_action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); + if (rx_action) + entry->action = rx_action; +} + static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index, u8 intf, struct mcam_entry *entry, bool enable) @@ -304,6 +493,11 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0); } + /* copy VF default entry action to the VF mcam entry */ + if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries) + npc_get_default_entry_action(rvu, mcam, blkaddr, actindex, + entry); + /* Set 'action' */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action); @@ -317,6 +511,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true); } +void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 src, + struct mcam_entry *entry, u8 *intf, u8 *ena) +{ + int sbank = npc_get_bank(mcam, src); + int bank, kw = 0; + u64 cam0, cam1; + + src &= (mcam->banksize - 1); + bank = sbank; + + for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) { + cam1 = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1)); + cam0 = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0)); + npc_fill_entryword(entry, kw, cam0, cam1); + + cam1 = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1)); + cam0 = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0)); + npc_fill_entryword(entry, kw + 1, cam0, cam1); + } + + entry->action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); + entry->vtag_action = + rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); + *intf = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3; + *ena = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1; +} + static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, u16 dest) { @@ -371,11 +601,11 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; struct npc_mcam *mcam = &rvu->hw->mcam; - struct mcam_entry entry = { {0} }; struct nix_rx_action action; - int blkaddr, index, kwi; - u64 mac = 0; + int blkaddr, index; /* AF's VFs work in promiscuous mode */ if (is_afvf(pcifunc)) @@ -385,20 +615,9 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, if (blkaddr < 0) return; - for (index = ETH_ALEN - 1; index >= 0; index--) - mac |= ((u64)*mac_addr++) << (8 * index); - index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); - /* Match ingress channel and DMAC */ - entry.kw[0] = chan; - entry.kw_mask[0] = 0xFFFULL; - - kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64); - entry.kw[kwi] = mac; - entry.kw_mask[kwi] = BIT_ULL(48) - 1; - /* Don't change the action if entry is already enabled * Otherwise RSS action may get overwritten. */ @@ -411,25 +630,26 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, action.pf_func = pcifunc; } - entry.action = *(u64 *)&action; - npc_config_mcam_entry(rvu, mcam, blkaddr, index, - NIX_INTF_RX, &entry, true); - - /* add VLAN matching, setup action and save entry back for later */ - entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20; - entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20; + req.default_rule = 1; + ether_addr_copy(req.packet.dmac, mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.features = BIT_ULL(NPC_DMAC); + req.channel = chan; + req.intf = pfvf->nix_rx_intf; + req.op = action.op; + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = action.pf_func; + req.index = action.index; + req.match_id = action.match_id; + req.flow_key_alg = action.flow_key_alg; - entry.vtag_action = VTAG0_VALID_BIT | - FIELD_PREP(VTAG0_TYPE_MASK, 0) | - FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) | - FIELD_PREP(VTAG0_RELPTR_MASK, 12); - - memcpy(&pfvf->entry, &entry, sizeof(entry)); + rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, bool allmulti) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, ucast_idx, index, kwi; struct mcam_entry entry = { {0} }; @@ -473,7 +693,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, entry.action = *(u64 *)&action; npc_config_mcam_entry(rvu, mcam, blkaddr, index, - NIX_INTF_RX, &entry, true); + pfvf->nix_rx_intf, &entry, true); } static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, @@ -531,6 +751,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, /* Get 'pcifunc' of PF device */ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK; + pfvf = rvu_get_pfvf(rvu, pcifunc); index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); @@ -553,14 +774,13 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, action.op = NIX_RX_ACTIONOP_UCAST; action.pf_func = pcifunc; } else { - pfvf = rvu_get_pfvf(rvu, pcifunc); action.index = pfvf->bcast_mce_idx; action.op = NIX_RX_ACTIONOP_MCAST; } entry.action = *(u64 *)&action; npc_config_mcam_entry(rvu, mcam, blkaddr, index, - NIX_INTF_RX, &entry, true); + pfvf->nix_rx_intf, &entry, true); } void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) @@ -579,12 +799,47 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); } +static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc, u64 rx_action) +{ + int actindex, index, bank; + bool enable; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + return; + + mutex_lock(&mcam->lock); + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2target_pffunc[index] == pcifunc) { + bank = npc_get_bank(mcam, index); + actindex = index; + index &= (mcam->banksize - 1); + + /* read vf flow entry enable status */ + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, + actindex); + /* disable before mcam entry update */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, + false); + /* update 'action' */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank), + rx_action); + if (enable) + npc_enable_mcam_entry(rvu, mcam, blkaddr, + actindex, true); + } + } + mutex_unlock(&mcam->lock); +} + void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_rx_action action; int blkaddr, index, bank; + struct rvu_pfvf *pfvf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -621,6 +876,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); + /* update the VF flow rule action with the VF default entry action */ + if (mcam_index < 0) + npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc, + *(u64 *)&action); + + /* update the action change in default rule */ + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (pfvf->def_ucast_rule) + pfvf->def_ucast_rule->rx_action = action; + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); @@ -635,8 +900,6 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); } - - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); } static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, @@ -688,8 +951,6 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); else rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); - - rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); } void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) @@ -704,7 +965,41 @@ void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + mutex_lock(&mcam->lock); + + /* Disable MCAM entries directing traffic to this 'pcifunc' */ + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (is_npc_intf_rx(rule->intf) && + rule->rx_action.pf_func == pcifunc) { + npc_enable_mcam_entry(rvu, mcam, blkaddr, + rule->entry, false); + rule->enable = false; + /* Indicate that default rule is disabled */ + if (rule->default_rule) + pfvf->def_ucast_rule = NULL; + } + } + + mutex_unlock(&mcam->lock); + + npc_mcam_disable_flows(rvu, pcifunc); + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); +} + +void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule, *tmp; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -713,12 +1008,20 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) mutex_lock(&mcam->lock); - /* Disable and free all MCAM entries mapped to this 'pcifunc' */ + /* Free all MCAM entries owned by this 'pcifunc' */ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); - /* Free all MCAM counters mapped to this 'pcifunc' */ + /* Free all MCAM counters owned by this 'pcifunc' */ npc_mcam_free_all_counters(rvu, mcam, pcifunc); + /* Delete MCAM entries owned by this 'pcifunc' */ + list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) { + if (rule->owner == pcifunc && !rule->default_rule) { + list_del(&rule->list); + kfree(rule); + } + } + mutex_unlock(&mcam->lock); rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); @@ -732,44 +1035,78 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) rvu_write64(rvu, blkaddr, \ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) -static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, - const struct npc_mcam_kex *mkex) +static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr, + struct npc_mcam_kex *mkex, u8 intf) { int lid, lt, ld, fl; - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), - mkex->keyx_cfg[NIX_INTF_RX]); - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), - mkex->keyx_cfg[NIX_INTF_TX]); + if (is_npc_intf_tx(intf)) + return; - for (ld = 0; ld < NPC_MAX_LD; ld++) - rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), - mkex->kex_ld_flags[ld]); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), + mkex->keyx_cfg[NIX_INTF_RX]); + /* Program LDATA */ for (lid = 0; lid < NPC_MAX_LID; lid++) { for (lt = 0; lt < NPC_MAX_LT; lt++) { - for (ld = 0; ld < NPC_MAX_LD; ld++) { - SET_KEX_LD(NIX_INTF_RX, lid, lt, ld, + for (ld = 0; ld < NPC_MAX_LD; ld++) + SET_KEX_LD(intf, lid, lt, ld, mkex->intf_lid_lt_ld[NIX_INTF_RX] [lid][lt][ld]); - - SET_KEX_LD(NIX_INTF_TX, lid, lt, ld, - mkex->intf_lid_lt_ld[NIX_INTF_TX] - [lid][lt][ld]); - } } } - + /* Program LFLAGS */ for (ld = 0; ld < NPC_MAX_LD; ld++) { - for (fl = 0; fl < NPC_MAX_LFL; fl++) { - SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl, + for (fl = 0; fl < NPC_MAX_LFL; fl++) + SET_KEX_LDFLAGS(intf, ld, fl, mkex->intf_ld_flags[NIX_INTF_RX] [ld][fl]); + } +} - SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl, +static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr, + struct npc_mcam_kex *mkex, u8 intf) +{ + int lid, lt, ld, fl; + + if (is_npc_intf_rx(intf)) + return; + + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), + mkex->keyx_cfg[NIX_INTF_TX]); + + /* Program LDATA */ + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) + SET_KEX_LD(intf, lid, lt, ld, + mkex->intf_lid_lt_ld[NIX_INTF_TX] + [lid][lt][ld]); + } + } + /* Program LFLAGS */ + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) + SET_KEX_LDFLAGS(intf, ld, fl, mkex->intf_ld_flags[NIX_INTF_TX] [ld][fl]); - } + } +} + +static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, + struct npc_mcam_kex *mkex) +{ + struct rvu_hwinfo *hw = rvu->hw; + u8 intf; + int ld; + + for (ld = 0; ld < NPC_MAX_LD; ld++) + rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), + mkex->kex_ld_flags[ld]); + + for (intf = 0; intf < hw->npc_intfs; intf++) { + npc_program_mkex_rx(rvu, blkaddr, mkex, intf); + npc_program_mkex_tx(rvu, blkaddr, mkex, intf); } } @@ -909,7 +1246,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu, kpu, profile->cam_entries, profile->action_entries); } - max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF; + max_entries = rvu->hw->npc_kpu_entries; /* Program CAM match entries for previous KPU extracted data */ num_entries = min_t(int, profile->cam_entries, max_entries); @@ -964,9 +1301,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr) int num_pkinds, num_kpus, idx; struct npc_pkind *pkind; - /* Get HW limits */ - hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F; - /* Disable all KPUs and their entries */ for (idx = 0; idx < hw->npc_kpus; idx++) { rvu_write64(rvu, blkaddr, @@ -1005,12 +1339,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) int rsvd, err; u64 cfg; - /* Get HW limits */ - cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); - mcam->banks = (cfg >> 44) & 0xF; - mcam->banksize = (cfg >> 28) & 0xFFFF; - mcam->counters.max = (cfg >> 48) & 0xFFFF; - /* Actual number of MCAM entries vary by entry size */ cfg = (rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07; @@ -1077,12 +1405,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) mcam->hprio_count = mcam->lprio_count; mcam->hprio_end = mcam->hprio_count; - /* Reserve last counter for MCAM RX miss action which is set to - * drop pkt. This way we will know how many pkts didn't match - * any MCAM entry. - */ - mcam->counters.max--; - mcam->rx_miss_act_cntr = mcam->counters.max; /* Allocate bitmap for managing MCAM counters and memory * for saving counter to RVU PFFUNC allocation mapping. @@ -1109,6 +1431,12 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (!mcam->cntr_refcnt) goto free_mem; + /* Alloc memory for saving target device of mcam rule */ + mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2target_pffunc) + goto free_mem; + mutex_init(&mcam->lock); return 0; @@ -1118,12 +1446,110 @@ free_mem: return -ENOMEM; } +static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) +{ + struct npc_pkind *pkind = &rvu->hw->pkind; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + u64 npc_const, npc_const1; + u64 npc_const2 = 0; + + npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1); + if (npc_const1 & BIT_ULL(63)) + npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2); + + pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL; + hw->npc_kpu_entries = npc_const1 & 0xFFFULL; + hw->npc_kpus = (npc_const >> 8) & 0x1FULL; + hw->npc_intfs = npc_const & 0xFULL; + hw->npc_counters = (npc_const >> 48) & 0xFFFFULL; + + mcam->banks = (npc_const >> 44) & 0xFULL; + mcam->banksize = (npc_const >> 28) & 0xFFFFULL; + /* Extended set */ + if (npc_const2) { + hw->npc_ext_set = true; + hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; + mcam->banksize = npc_const2 & 0xFFFFULL; + } + + mcam->counters.max = hw->npc_counters; +} + +static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + u64 nibble_ena, rx_kex, tx_kex; + u8 intf; + + /* Reserve last counter for MCAM RX miss action which is set to + * drop packet. This way we will know how many pkts didn't match + * any MCAM entry. + */ + mcam->counters.max--; + mcam->rx_miss_act_cntr = mcam->counters.max; + + rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX]; + tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX]; + nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); + + nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena); + if (nibble_ena) { + tx_kex &= ~NPC_PARSE_NIBBLE; + tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); + npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex; + } + + /* Configure RX interfaces */ + for (intf = 0; intf < hw->npc_intfs; intf++) { + if (is_npc_intf_tx(intf)) + continue; + + /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), + rx_kex); + + /* If MCAM lookup doesn't result in a match, drop the received + * packet. And map this action to a counter to count dropped + * packets. + */ + rvu_write64(rvu, blkaddr, + NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP); + + /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9] + * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0] + */ + rvu_write64(rvu, blkaddr, + NPC_AF_INTFX_MISS_STAT_ACT(intf), + ((mcam->rx_miss_act_cntr >> 9) << 12) | + BIT_ULL(9) | mcam->rx_miss_act_cntr); + } + + /* Configure TX interfaces */ + for (intf = 0; intf < hw->npc_intfs; intf++) { + if (is_npc_intf_rx(intf)) + continue; + + /* Extract Ltypes LID_LA to LID_LE */ + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf), + tx_kex); + + /* Set TX miss action to UCAST_DEFAULT i.e + * transmit the packet on NIX LF SQ's default channel. + */ + rvu_write64(rvu, blkaddr, + NPC_AF_INTFX_MISS_ACT(intf), + NIX_TX_ACTIONOP_UCAST_DEFAULT); + } +} + int rvu_npc_init(struct rvu *rvu) { struct npc_kpu_profile_adapter *kpu = &rvu->kpu; struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_mcam *mcam = &rvu->hw->mcam; - u64 cfg, nibble_ena, rx_kex, tx_kex; int blkaddr, entry, bank, err; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -1132,17 +1558,15 @@ int rvu_npc_init(struct rvu *rvu) return -ENODEV; } + rvu_npc_hw_init(rvu, blkaddr); + /* First disable all MCAM entries, to stop traffic towards NIXLFs */ - cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); - for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) { - for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++) + for (bank = 0; bank < mcam->banks; bank++) { + for (entry = 0; entry < mcam->banksize; entry++) rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); } - /* Allocate resource bimap for pkind*/ - pkind->rsrc.max = (rvu_read64(rvu, blkaddr, - NPC_AF_CONST1) >> 12) & 0xFF; err = rvu_alloc_bitmap(&pkind->rsrc); if (err) return err; @@ -1180,42 +1604,21 @@ int rvu_npc_init(struct rvu *rvu) BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); - /* Set RX and TX side MCAM search key size. - * LA..LD (ltype only) + Channel - */ - rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX]; - tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX]; - nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex); - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex); - /* Due to an errata (35786) in A0 pass silicon, parse nibble enable - * configuration has to be identical for both Rx and Tx interfaces. - */ - if (is_rvu_96xx_B0(rvu)) { - tx_kex &= ~NPC_PARSE_NIBBLE; - tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena); - } - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex); - - err = npc_mcam_rsrcs_init(rvu, blkaddr); - if (err) - return err; + rvu_npc_setup_interfaces(rvu, blkaddr); /* Configure MKEX profile */ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name); - /* Set TX miss action to UCAST_DEFAULT i.e - * transmit the packet on NIX LF SQ's default channel. - */ - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX), - NIX_TX_ACTIONOP_UCAST_DEFAULT); + err = npc_mcam_rsrcs_init(rvu, blkaddr); + if (err) + return err; - /* If MCAM lookup doesn't result in a match, drop the received packet. - * And map this action to a counter to count dropped pkts. - */ - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX), - NIX_RX_ACTIONOP_DROP); - rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX), - BIT_ULL(9) | mcam->rx_miss_act_cntr); + err = npc_flow_steering_init(rvu, blkaddr); + if (err) { + dev_err(rvu->dev, + "Incorrect mkex profile loaded using default mkex\n"); + npc_load_mkex_profile(rvu, blkaddr, def_pfl_name); + } return 0; } @@ -1307,10 +1710,13 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, /* Set mapping and increment counter's refcnt */ mcam->entry2cntr_map[entry] = cntr; mcam->cntr_refcnt[cntr]++; - /* Enable stats */ + /* Enable stats + * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9] + * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0] + */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), - BIT_ULL(9) | cntr); + ((cntr >> 9) << 12) | BIT_ULL(9) | cntr); } static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, @@ -1380,6 +1786,7 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, cntr); + mcam->entry2target_pffunc[index] = 0x0; } } } @@ -1766,6 +2173,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, goto exit; mcam->entry2pfvf_map[req->entry] = 0; + mcam->entry2target_pffunc[req->entry] = 0x0; npc_mcam_clear_bit(mcam, req->entry); npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); @@ -1785,18 +2193,49 @@ exit: return rc; } +int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu, + struct npc_mcam_read_entry_req *req, + struct npc_mcam_read_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (!rc) { + npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry, + &rsp->entry_data, + &rsp->intf, &rsp->enable); + } + + mutex_unlock(&mcam->lock); + return rc; +} + int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, struct npc_mcam_write_entry_req *req, struct msg_rsp *rsp) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; + u16 channel, chan_mask; int blkaddr, rc; + u8 nix_intf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; + chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; + channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; + channel &= chan_mask; + mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (rc) @@ -1808,12 +2247,28 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, goto exit; } - if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) { + if (!is_npc_interface_valid(rvu, req->intf)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } - npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf, + if (is_npc_intf_tx(req->intf)) + nix_intf = pfvf->nix_tx_intf; + else + nix_intf = pfvf->nix_rx_intf; + + if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, + pcifunc)) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf, &req->entry_data, req->enable_entry); if (req->set_cntr) @@ -2141,6 +2596,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, struct npc_mcam_alloc_and_write_entry_req *req, struct npc_mcam_alloc_and_write_entry_rsp *rsp) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam_alloc_counter_req cntr_req; struct npc_mcam_alloc_counter_rsp cntr_rsp; struct npc_mcam_alloc_entry_req entry_req; @@ -2148,13 +2604,26 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, struct npc_mcam *mcam = &rvu->hw->mcam; u16 entry = NPC_MCAM_ENTRY_INVALID; u16 cntr = NPC_MCAM_ENTRY_INVALID; + u16 channel, chan_mask; int blkaddr, rc; + u8 nix_intf; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; - if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) + if (!is_npc_interface_valid(rvu, req->intf)) + return NPC_MCAM_INVALID_REQ; + + chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; + channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; + channel &= chan_mask; + + if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel)) + return NPC_MCAM_INVALID_REQ; + + if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, + req->hdr.pcifunc)) return NPC_MCAM_INVALID_REQ; /* Try to allocate a MCAM entry */ @@ -2196,7 +2665,13 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, write_entry: mutex_lock(&mcam->lock); - npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf, + + if (is_npc_intf_tx(req->intf)) + nix_intf = pfvf->nix_tx_intf; + else + nix_intf = pfvf->nix_rx_intf; + + npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf, &req->entry_data, req->enable_entry); if (req->alloc_cntr) @@ -2255,26 +2730,72 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf) +bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf, + u16 pcifunc, u8 intf, struct mcam_entry *entry, + int *index) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; - int blkaddr, index; bool enable; + u8 nix_intf; + + if (is_npc_intf_tx(intf)) + nix_intf = pfvf->nix_tx_intf; + else + nix_intf = pfvf->nix_rx_intf; + + *index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + /* dont force enable unicast entry */ + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index); + npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf, + entry, enable); + + return enable; +} + +int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, + struct msg_req *req, + struct npc_mcam_read_base_rule_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int index, blkaddr, nixlf, rc = 0; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_pfvf *pfvf; + u8 intf, enable; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + return NPC_MCAM_INVALID_REQ; - if (!pfvf->rxvlan) - return 0; + /* Return the channel number in case of PF */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { + pfvf = rvu_get_pfvf(rvu, pcifunc); + rsp->entry.kw[0] = pfvf->rx_chan_base; + rsp->entry.kw_mask[0] = 0xFFFULL; + goto out; + } + /* Find the pkt steering rule installed by PF to this VF */ + mutex_lock(&mcam->lock); + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2target_pffunc[index] == pcifunc) + goto read_entry; + } + + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (rc < 0) { + mutex_unlock(&mcam->lock); + goto out; + } + /* Read the default ucast entry if there is no pkt steering rule */ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); - pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index); - enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index); - npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index, - NIX_INTF_RX, &pfvf->entry, enable); - - return 0; +read_entry: + /* Read the mcam entry */ + npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf, + &enable); + mutex_unlock(&mcam->lock); +out: + return rc; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c new file mode 100644 index 000000000000..14832b66d1fe --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -0,0 +1,1336 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. + */ + +#include <linux/bitfield.h> + +#include "rvu_struct.h" +#include "rvu_reg.h" +#include "rvu.h" +#include "npc.h" + +#define NPC_BYTESM GENMASK_ULL(19, 16) +#define NPC_HDR_OFFSET GENMASK_ULL(15, 8) +#define NPC_KEY_OFFSET GENMASK_ULL(5, 0) +#define NPC_LDATA_EN BIT_ULL(7) + +static const char * const npc_flow_names[] = { + [NPC_DMAC] = "dmac", + [NPC_SMAC] = "smac", + [NPC_ETYPE] = "ether type", + [NPC_OUTER_VID] = "outer vlan id", + [NPC_TOS] = "tos", + [NPC_SIP_IPV4] = "ipv4 source ip", + [NPC_DIP_IPV4] = "ipv4 destination ip", + [NPC_SIP_IPV6] = "ipv6 source ip", + [NPC_DIP_IPV6] = "ipv6 destination ip", + [NPC_SPORT_TCP] = "tcp source port", + [NPC_DPORT_TCP] = "tcp destination port", + [NPC_SPORT_UDP] = "udp source port", + [NPC_DPORT_UDP] = "udp destination port", + [NPC_SPORT_SCTP] = "sctp source port", + [NPC_DPORT_SCTP] = "sctp destination port", + [NPC_UNKNOWN] = "unknown", +}; + +const char *npc_get_field_name(u8 hdr) +{ + if (hdr >= ARRAY_SIZE(npc_flow_names)) + return npc_flow_names[NPC_UNKNOWN]; + + return npc_flow_names[hdr]; +} + +/* Compute keyword masks and figure out the number of keywords a field + * spans in the key. + */ +static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type, + u8 nr_bits, int start_kwi, int offset, u8 intf) +{ + struct npc_key_field *field = &mcam->rx_key_fields[type]; + u8 bits_in_kw; + int max_kwi; + + if (mcam->banks_per_entry == 1) + max_kwi = 1; /* NPC_MCAM_KEY_X1 */ + else if (mcam->banks_per_entry == 2) + max_kwi = 3; /* NPC_MCAM_KEY_X2 */ + else + max_kwi = 6; /* NPC_MCAM_KEY_X4 */ + + if (is_npc_intf_tx(intf)) + field = &mcam->tx_key_fields[type]; + + if (offset + nr_bits <= 64) { + /* one KW only */ + if (start_kwi > max_kwi) + return; + field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0) + << offset; + field->nr_kws = 1; + } else if (offset + nr_bits > 64 && + offset + nr_bits <= 128) { + /* two KWs */ + if (start_kwi + 1 > max_kwi) + return; + /* first KW mask */ + bits_in_kw = 64 - offset; + field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) + << offset; + /* second KW mask i.e. mask for rest of bits */ + bits_in_kw = nr_bits + offset - 64; + field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0); + field->nr_kws = 2; + } else { + /* three KWs */ + if (start_kwi + 2 > max_kwi) + return; + /* first KW mask */ + bits_in_kw = 64 - offset; + field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0) + << offset; + /* second KW mask */ + field->kw_mask[start_kwi + 1] = ~0ULL; + /* third KW mask i.e. mask for rest of bits */ + bits_in_kw = nr_bits + offset - 128; + field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0); + field->nr_kws = 3; + } +} + +/* Helper function to figure out whether field exists in the key */ +static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct npc_key_field *input; + + input = &mcam->rx_key_fields[type]; + if (is_npc_intf_tx(intf)) + input = &mcam->tx_key_fields[type]; + + return input->nr_kws > 0; +} + +static bool npc_is_same(struct npc_key_field *input, + struct npc_key_field *field) +{ + int ret; + + ret = memcmp(&input->layer_mdata, &field->layer_mdata, + sizeof(struct npc_layer_mdata)); + return ret == 0; +} + +static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type, + u64 cfg, u8 lid, u8 lt, u8 intf) +{ + struct npc_key_field *input = &mcam->rx_key_fields[type]; + + if (is_npc_intf_tx(intf)) + input = &mcam->tx_key_fields[type]; + + input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); + input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg); + input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1; + input->layer_mdata.ltype = lt; + input->layer_mdata.lid = lid; +} + +static bool npc_check_overlap_fields(struct npc_key_field *input1, + struct npc_key_field *input2) +{ + int kwi; + + /* Fields with same layer id and different ltypes are mutually + * exclusive hence they can be overlapped + */ + if (input1->layer_mdata.lid == input2->layer_mdata.lid && + input1->layer_mdata.ltype != input2->layer_mdata.ltype) + return false; + + for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) { + if (input1->kw_mask[kwi] & input2->kw_mask[kwi]) + return true; + } + + return false; +} + +/* Helper function to check whether given field overlaps with any other fields + * in the key. Due to limitations on key size and the key extraction profile in + * use higher layers can overwrite lower layer's header fields. Hence overlap + * needs to be checked. + */ +static bool npc_check_overlap(struct rvu *rvu, int blkaddr, + enum key_fields type, u8 start_lid, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct npc_key_field *dummy, *input; + int start_kwi, offset; + u8 nr_bits, lid, lt, ld; + u64 cfg; + + dummy = &mcam->rx_key_fields[NPC_UNKNOWN]; + input = &mcam->rx_key_fields[type]; + + if (is_npc_intf_tx(intf)) { + dummy = &mcam->tx_key_fields[NPC_UNKNOWN]; + input = &mcam->tx_key_fields[type]; + } + + for (lid = start_lid; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_INTFX_LIDX_LTX_LDX_CFG + (intf, lid, lt, ld)); + if (!FIELD_GET(NPC_LDATA_EN, cfg)) + continue; + memset(dummy, 0, sizeof(struct npc_key_field)); + npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg, + lid, lt, intf); + /* exclude input */ + if (npc_is_same(input, dummy)) + continue; + start_kwi = dummy->layer_mdata.key / 8; + offset = (dummy->layer_mdata.key * 8) % 64; + nr_bits = dummy->layer_mdata.len * 8; + /* form KW masks */ + npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits, + start_kwi, offset, intf); + /* check any input field bits falls in any + * other field bits. + */ + if (npc_check_overlap_fields(dummy, input)) + return true; + } + } + } + + return false; +} + +static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, + u8 intf) +{ + if (!npc_is_field_present(rvu, type, intf) || + npc_check_overlap(rvu, blkaddr, type, 0, intf)) + return -EOPNOTSUPP; + return 0; +} + +static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, + u8 key_nibble, u8 intf) +{ + u8 offset = (key_nibble * 4) % 64; /* offset within key word */ + u8 kwi = (key_nibble * 4) / 64; /* which word in key */ + u8 nr_bits = 4; /* bits in a nibble */ + u8 type; + + switch (bit_number) { + case 0 ... 2: + type = NPC_CHAN; + break; + case 3: + type = NPC_ERRLEV; + break; + case 4 ... 5: + type = NPC_ERRCODE; + break; + case 6: + type = NPC_LXMB; + break; + /* check for LTYPE only as of now */ + case 9: + type = NPC_LA; + break; + case 12: + type = NPC_LB; + break; + case 15: + type = NPC_LC; + break; + case 18: + type = NPC_LD; + break; + case 21: + type = NPC_LE; + break; + case 24: + type = NPC_LF; + break; + case 27: + type = NPC_LG; + break; + case 30: + type = NPC_LH; + break; + default: + return; + }; + npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); +} + +static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct npc_key_field *key_fields; + /* Ether type can come from three layers + * (ethernet, single tagged, double tagged) + */ + struct npc_key_field *etype_ether; + struct npc_key_field *etype_tag1; + struct npc_key_field *etype_tag2; + /* Outer VLAN TCI can come from two layers + * (single tagged, double tagged) + */ + struct npc_key_field *vlan_tag1; + struct npc_key_field *vlan_tag2; + u64 *features; + u8 start_lid; + int i; + + key_fields = mcam->rx_key_fields; + features = &mcam->rx_features; + + if (is_npc_intf_tx(intf)) { + key_fields = mcam->tx_key_fields; + features = &mcam->tx_features; + } + + /* Handle header fields which can come from multiple layers like + * etype, outer vlan tci. These fields should have same position in + * the key otherwise to install a mcam rule more than one entry is + * needed which complicates mcam space management. + */ + etype_ether = &key_fields[NPC_ETYPE_ETHER]; + etype_tag1 = &key_fields[NPC_ETYPE_TAG1]; + etype_tag2 = &key_fields[NPC_ETYPE_TAG2]; + vlan_tag1 = &key_fields[NPC_VLAN_TAG1]; + vlan_tag2 = &key_fields[NPC_VLAN_TAG2]; + + /* if key profile programmed does not extract Ethertype at all */ + if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) + goto vlan_tci; + + /* if key profile programmed extracts Ethertype from one layer */ + if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) + key_fields[NPC_ETYPE] = *etype_ether; + if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws) + key_fields[NPC_ETYPE] = *etype_tag1; + if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws) + key_fields[NPC_ETYPE] = *etype_tag2; + + /* if key profile programmed extracts Ethertype from multiple layers */ + if (etype_ether->nr_kws && etype_tag1->nr_kws) { + for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { + if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) + goto vlan_tci; + } + key_fields[NPC_ETYPE] = *etype_tag1; + } + if (etype_ether->nr_kws && etype_tag2->nr_kws) { + for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { + if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) + goto vlan_tci; + } + key_fields[NPC_ETYPE] = *etype_tag2; + } + if (etype_tag1->nr_kws && etype_tag2->nr_kws) { + for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { + if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) + goto vlan_tci; + } + key_fields[NPC_ETYPE] = *etype_tag2; + } + + /* check none of higher layers overwrite Ethertype */ + start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1; + if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) + goto vlan_tci; + *features |= BIT_ULL(NPC_ETYPE); +vlan_tci: + /* if key profile does not extract outer vlan tci at all */ + if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) + goto done; + + /* if key profile extracts outer vlan tci from one layer */ + if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws) + key_fields[NPC_OUTER_VID] = *vlan_tag1; + if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws) + key_fields[NPC_OUTER_VID] = *vlan_tag2; + + /* if key profile extracts outer vlan tci from multiple layers */ + if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) { + for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { + if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) + goto done; + } + key_fields[NPC_OUTER_VID] = *vlan_tag2; + } + /* check none of higher layers overwrite outer vlan tci */ + start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1; + if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) + goto done; + *features |= BIT_ULL(NPC_OUTER_VID); +done: + return; +} + +static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, + u8 lt, u64 cfg, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u8 hdr, key, nr_bytes, bit_offset; + u8 la_ltype, la_start; + /* starting KW index and starting bit position */ + int start_kwi, offset; + + nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1; + hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); + key = FIELD_GET(NPC_KEY_OFFSET, cfg); + start_kwi = key / 8; + offset = (key * 8) % 64; + + /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding + * ethernet header. + */ + if (is_npc_intf_tx(intf)) { + la_ltype = NPC_LT_LA_IH_NIX_ETHER; + la_start = 8; + } else { + la_ltype = NPC_LT_LA_ETHER; + la_start = 0; + } + +#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \ +do { \ + if (lid == (hlid) && lt == (hlt)) { \ + if ((hstart) >= hdr && \ + ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \ + bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \ + npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \ + npc_set_kw_masks(mcam, (name), (hlen) * 8, \ + start_kwi, offset + bit_offset, intf);\ + } \ + } \ +} while (0) + + /* List LID, LTYPE, start offset from layer and length(in bytes) of + * packet header fields below. + * Example: Source IP is 4 bytes and starts at 12th byte of IP header + */ + NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); + NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); + NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); + NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); + NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); + NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); + NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); + NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2); + NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2); + NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); + NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); + NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); + NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); + NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2); + NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2); + NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6); + NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6); + /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */ + NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2); +} + +static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u64 *features = &mcam->rx_features; + u64 tcp_udp_sctp; + int err, hdr; + + if (is_npc_intf_tx(intf)) + features = &mcam->tx_features; + + for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { + err = npc_check_field(rvu, blkaddr, hdr, intf); + if (!err) + *features |= BIT_ULL(hdr); + } + + tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) | + BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | + BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); + + /* for tcp/udp/sctp corresponding layer type should be in the key */ + if (*features & tcp_udp_sctp) + if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) + *features &= ~tcp_udp_sctp; + + /* for vlan corresponding layer type should be in the key */ + if (*features & BIT_ULL(NPC_OUTER_VID)) + if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) + *features &= ~BIT_ULL(NPC_OUTER_VID); +} + +/* Scan key extraction profile and record how fields of our interest + * fill the key structure. Also verify Channel and DMAC exists in + * key and not overwritten by other header fields. + */ +static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u8 lid, lt, ld, bitnr; + u8 key_nibble = 0; + u64 cfg; + + /* Scan and note how parse result is going to be in key. + * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from + * parse result in the key. The enabled nibbles from parse result + * will be concatenated in key. + */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf)); + cfg &= NPC_PARSE_NIBBLE; + for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) { + npc_scan_parse_result(mcam, bitnr, key_nibble, intf); + key_nibble++; + } + + /* Scan and note how layer data is going to be in key */ + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_INTFX_LIDX_LTX_LDX_CFG + (intf, lid, lt, ld)); + if (!FIELD_GET(NPC_LDATA_EN, cfg)) + continue; + npc_scan_ldata(rvu, blkaddr, lid, lt, cfg, + intf); + } + } + } + + return 0; +} + +static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr) +{ + int err; + + err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX); + if (err) + return err; + + err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX); + if (err) + return err; + + /* Channel is mandatory */ + if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) { + dev_err(rvu->dev, "Channel not present in Key\n"); + return -EINVAL; + } + /* check that none of the fields overwrite channel */ + if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) { + dev_err(rvu->dev, "Channel cannot be overwritten\n"); + return -EINVAL; + } + /* DMAC should be present in key for unicast filter to work */ + if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) { + dev_err(rvu->dev, "DMAC not present in Key\n"); + return -EINVAL; + } + /* check that none of the fields overwrite DMAC */ + if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) { + dev_err(rvu->dev, "DMAC cannot be overwritten\n"); + return -EINVAL; + } + + npc_set_features(rvu, blkaddr, NIX_INTF_TX); + npc_set_features(rvu, blkaddr, NIX_INTF_RX); + npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX); + npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX); + + return 0; +} + +int npc_flow_steering_init(struct rvu *rvu, int blkaddr) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + + INIT_LIST_HEAD(&mcam->mcam_rules); + + return npc_scan_verify_kex(rvu, blkaddr); +} + +static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u64 *mcam_features = &mcam->rx_features; + u64 unsupported; + u8 bit; + + if (is_npc_intf_tx(intf)) + mcam_features = &mcam->tx_features; + + unsupported = (*mcam_features ^ features) & ~(*mcam_features); + if (unsupported) { + dev_info(rvu->dev, "Unsupported flow(s):\n"); + for_each_set_bit(bit, (unsigned long *)&unsupported, 64) + dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); + return -EOPNOTSUPP; + } + + return 0; +} + +/* npc_update_entry - Based on the masks generated during + * the key scanning, updates the given entry with value and + * masks for the field of interest. Maximum 16 bytes of a packet + * header can be extracted by HW hence lo and hi are sufficient. + * When field bytes are less than or equal to 8 then hi should be + * 0 for value and mask. + * + * If exact match of value is required then mask should be all 1's. + * If any bits in mask are 0 then corresponding bits in value are + * dont care. + */ +static void npc_update_entry(struct rvu *rvu, enum key_fields type, + struct mcam_entry *entry, u64 val_lo, + u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct mcam_entry dummy = { {0} }; + struct npc_key_field *field; + u64 kw1, kw2, kw3; + u8 shift; + int i; + + field = &mcam->rx_key_fields[type]; + if (is_npc_intf_tx(intf)) + field = &mcam->tx_key_fields[type]; + + if (!field->nr_kws) + return; + + for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { + if (!field->kw_mask[i]) + continue; + /* place key value in kw[x] */ + shift = __ffs64(field->kw_mask[i]); + /* update entry value */ + kw1 = (val_lo << shift) & field->kw_mask[i]; + dummy.kw[i] = kw1; + /* update entry mask */ + kw1 = (mask_lo << shift) & field->kw_mask[i]; + dummy.kw_mask[i] = kw1; + + if (field->nr_kws == 1) + break; + /* place remaining bits of key value in kw[x + 1] */ + if (field->nr_kws == 2) { + /* update entry value */ + kw2 = shift ? val_lo >> (64 - shift) : 0; + kw2 |= (val_hi << shift); + kw2 &= field->kw_mask[i + 1]; + dummy.kw[i + 1] = kw2; + /* update entry mask */ + kw2 = shift ? mask_lo >> (64 - shift) : 0; + kw2 |= (mask_hi << shift); + kw2 &= field->kw_mask[i + 1]; + dummy.kw_mask[i + 1] = kw2; + break; + } + /* place remaining bits of key value in kw[x + 1], kw[x + 2] */ + if (field->nr_kws == 3) { + /* update entry value */ + kw2 = shift ? val_lo >> (64 - shift) : 0; + kw2 |= (val_hi << shift); + kw2 &= field->kw_mask[i + 1]; + kw3 = shift ? val_hi >> (64 - shift) : 0; + kw3 &= field->kw_mask[i + 2]; + dummy.kw[i + 1] = kw2; + dummy.kw[i + 2] = kw3; + /* update entry mask */ + kw2 = shift ? mask_lo >> (64 - shift) : 0; + kw2 |= (mask_hi << shift); + kw2 &= field->kw_mask[i + 1]; + kw3 = shift ? mask_hi >> (64 - shift) : 0; + kw3 &= field->kw_mask[i + 2]; + dummy.kw_mask[i + 1] = kw2; + dummy.kw_mask[i + 2] = kw3; + break; + } + } + /* dummy is ready with values and masks for given key + * field now clear and update input entry with those + */ + for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) { + if (!field->kw_mask[i]) + continue; + entry->kw[i] &= ~field->kw_mask[i]; + entry->kw_mask[i] &= ~field->kw_mask[i]; + + entry->kw[i] |= dummy.kw[i]; + entry->kw_mask[i] |= dummy.kw_mask[i]; + } +} + +#define IPV6_WORDS 4 + +static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, + u64 features, struct flow_msg *pkt, + struct flow_msg *mask, + struct rvu_npc_mcam_rule *output, u8 intf) +{ + u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS]; + u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS]; + struct flow_msg *opkt = &output->packet; + struct flow_msg *omask = &output->mask; + u64 mask_lo, mask_hi; + u64 val_lo, val_hi; + + /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet + * values to be programmed in MCAM should as below: + * val_high: 0xfe80000000000000 + * val_low: 0x2c6863fffe5e2d0a + */ + if (features & BIT_ULL(NPC_SIP_IPV6)) { + be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS); + be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); + + mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1]; + mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3]; + val_hi = (u64)src_ip[0] << 32 | src_ip[1]; + val_lo = (u64)src_ip[2] << 32 | src_ip[3]; + + npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi, + mask_lo, mask_hi, intf); + memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src)); + memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src)); + } + if (features & BIT_ULL(NPC_DIP_IPV6)) { + be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS); + be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); + + mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1]; + mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3]; + val_hi = (u64)dst_ip[0] << 32 | dst_ip[1]; + val_lo = (u64)dst_ip[2] << 32 | dst_ip[3]; + + npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi, + mask_lo, mask_hi, intf); + memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst)); + memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst)); + } +} + +static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, + u64 features, struct flow_msg *pkt, + struct flow_msg *mask, + struct rvu_npc_mcam_rule *output, u8 intf) +{ + u64 dmac_mask = ether_addr_to_u64(mask->dmac); + u64 smac_mask = ether_addr_to_u64(mask->smac); + u64 dmac_val = ether_addr_to_u64(pkt->dmac); + u64 smac_val = ether_addr_to_u64(pkt->smac); + struct flow_msg *opkt = &output->packet; + struct flow_msg *omask = &output->mask; + + if (!features) + return; + + /* For tcp/udp/sctp LTYPE should be present in entry */ + if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP))) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, + 0, ~0ULL, 0, intf); + if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP))) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, + 0, ~0ULL, 0, intf); + if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP))) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, + 0, ~0ULL, 0, intf); + + if (features & BIT_ULL(NPC_OUTER_VID)) + npc_update_entry(rvu, NPC_LB, entry, + NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, + NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); + +#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ +do { \ + if (features & BIT_ULL((field))) { \ + npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \ + (mask_lo), (mask_hi), intf); \ + memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \ + memcpy(&omask->member, &mask->member, sizeof(mask->member)); \ + } \ +} while (0) + + NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0); + NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0); + NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0, + ntohs(mask->etype), 0); + NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0, + ntohl(mask->ip4src), 0); + NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0, + ntohl(mask->ip4dst), 0); + NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0, + ntohs(mask->sport), 0); + NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0, + ntohs(mask->sport), 0); + NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0, + ntohs(mask->dport), 0); + NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0, + ntohs(mask->dport), 0); + NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0, + ntohs(mask->sport), 0); + NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0, + ntohs(mask->dport), 0); + + NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0, + ntohs(mask->vlan_tci), 0); + + npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); +} + +static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, + u16 entry) +{ + struct rvu_npc_mcam_rule *iter; + + mutex_lock(&mcam->lock); + list_for_each_entry(iter, &mcam->mcam_rules, list) { + if (iter->entry == entry) { + mutex_unlock(&mcam->lock); + return iter; + } + } + mutex_unlock(&mcam->lock); + + return NULL; +} + +static void rvu_mcam_add_rule(struct npc_mcam *mcam, + struct rvu_npc_mcam_rule *rule) +{ + struct list_head *head = &mcam->mcam_rules; + struct rvu_npc_mcam_rule *iter; + + mutex_lock(&mcam->lock); + list_for_each_entry(iter, &mcam->mcam_rules, list) { + if (iter->entry > rule->entry) + break; + head = &iter->list; + } + + list_add(&rule->list, head); + mutex_unlock(&mcam->lock); +} + +static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule) +{ + struct npc_mcam_oper_counter_req free_req = { 0 }; + struct msg_rsp free_rsp; + + if (!rule->has_cntr) + return; + + free_req.hdr.pcifunc = pcifunc; + free_req.cntr = rule->cntr; + + rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); + rule->has_cntr = false; +} + +static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule, + struct npc_install_flow_rsp *rsp) +{ + struct npc_mcam_alloc_counter_req cntr_req = { 0 }; + struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; + int err; + + cntr_req.hdr.pcifunc = pcifunc; + cntr_req.contig = true; + cntr_req.count = 1; + + /* we try to allocate a counter to track the stats of this + * rule. If counter could not be allocated then proceed + * without counter because counters are limited than entries. + */ + err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, + &cntr_rsp); + if (!err && cntr_rsp.count) { + rule->cntr = cntr_rsp.cntr; + rule->has_cntr = true; + rsp->counter = rule->cntr; + } else { + rsp->counter = err; + } +} + +static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct mcam_entry *entry, + struct npc_install_flow_req *req, u16 target) +{ + struct nix_rx_action action; + + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, + ~0ULL, 0, NIX_INTF_RX); + + *(u64 *)&action = 0x00; + action.pf_func = target; + action.op = req->op; + action.index = req->index; + action.match_id = req->match_id; + action.flow_key_alg = req->flow_key_alg; + + if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule) + action = pfvf->def_ucast_rule->rx_action; + + entry->action = *(u64 *)&action; + + /* VTAG0 starts at 0th byte of LID_B. + * VTAG1 starts at 4th byte of LID_B. + */ + entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) | + FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) | + FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) | + FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) | + FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) | + FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | + FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | + FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); +} + +static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, + struct mcam_entry *entry, + struct npc_install_flow_req *req, u16 target) +{ + struct nix_tx_action action; + + npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), + 0, ~0ULL, 0, NIX_INTF_TX); + + *(u64 *)&action = 0x00; + action.op = req->op; + action.index = req->index; + action.match_id = req->match_id; + + entry->action = *(u64 *)&action; + + /* VTAG0 starts at 0th byte of LID_B. + * VTAG1 starts at 4th byte of LID_B. + */ + entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) | + FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) | + FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) | + FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) | + FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) | + FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | + FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | + FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); +} + +static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, + int nixlf, struct rvu_pfvf *pfvf, + struct npc_install_flow_req *req, + struct npc_install_flow_rsp *rsp, bool enable, + bool pf_set_vfs_mac) +{ + struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule; + u64 features, installed_features, missing_features = 0; + struct npc_mcam_write_entry_req write_req = { 0 }; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule dummy = { 0 }; + struct rvu_npc_mcam_rule *rule; + bool new = false, msg_from_vf; + u16 owner = req->hdr.pcifunc; + struct msg_rsp write_rsp; + struct mcam_entry *entry; + int entry_index, err; + + msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK); + + installed_features = req->features; + features = req->features; + entry = &write_req.entry_data; + entry_index = req->entry; + + npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, + req->intf); + + if (is_npc_intf_rx(req->intf)) + npc_update_rx_entry(rvu, pfvf, entry, req, target); + else + npc_update_tx_entry(rvu, pfvf, entry, req, target); + + /* Default unicast rules do not exist for TX */ + if (is_npc_intf_tx(req->intf)) + goto find_rule; + + if (def_ucast_rule) + missing_features = (def_ucast_rule->features ^ features) & + def_ucast_rule->features; + + if (req->default_rule && req->append) { + /* add to default rule */ + if (missing_features) + npc_update_flow(rvu, entry, missing_features, + &def_ucast_rule->packet, + &def_ucast_rule->mask, + &dummy, req->intf); + enable = rvu_npc_write_default_rule(rvu, blkaddr, + nixlf, target, + pfvf->nix_rx_intf, entry, + &entry_index); + installed_features = req->features | missing_features; + } else if (req->default_rule && !req->append) { + /* overwrite default rule */ + enable = rvu_npc_write_default_rule(rvu, blkaddr, + nixlf, target, + pfvf->nix_rx_intf, entry, + &entry_index); + } else if (msg_from_vf) { + /* normal rule - include default rule also to it for VF */ + npc_update_flow(rvu, entry, missing_features, + &def_ucast_rule->packet, &def_ucast_rule->mask, + &dummy, req->intf); + installed_features = req->features | missing_features; + } + +find_rule: + rule = rvu_mcam_find_rule(mcam, entry_index); + if (!rule) { + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + new = true; + } + /* no counter for default rule */ + if (req->default_rule) + goto update_rule; + + /* allocate new counter if rule has no counter */ + if (req->set_cntr && !rule->has_cntr) + rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp); + + /* if user wants to delete an existing counter for a rule then + * free the counter + */ + if (!req->set_cntr && rule->has_cntr) + rvu_mcam_remove_counter_from_rule(rvu, owner, rule); + + write_req.hdr.pcifunc = owner; + write_req.entry = req->entry; + write_req.intf = req->intf; + write_req.enable_entry = (u8)enable; + /* if counter is available then clear and use it */ + if (req->set_cntr && rule->has_cntr) { + rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00); + write_req.set_cntr = 1; + write_req.cntr = rule->cntr; + } + + err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, + &write_rsp); + if (err) { + rvu_mcam_remove_counter_from_rule(rvu, owner, rule); + if (new) + kfree(rule); + return err; + } +update_rule: + memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet)); + memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask)); + rule->entry = entry_index; + memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action)); + if (is_npc_intf_tx(req->intf)) + memcpy(&rule->tx_action, &entry->action, + sizeof(struct nix_tx_action)); + rule->vtag_action = entry->vtag_action; + rule->features = installed_features; + rule->default_rule = req->default_rule; + rule->owner = owner; + rule->enable = enable; + if (is_npc_intf_tx(req->intf)) + rule->intf = pfvf->nix_tx_intf; + else + rule->intf = pfvf->nix_rx_intf; + + if (new) + rvu_mcam_add_rule(mcam, rule); + if (req->default_rule) + pfvf->def_ucast_rule = rule; + + /* VF's MAC address is being changed via PF */ + if (pf_set_vfs_mac) { + ether_addr_copy(pfvf->default_mac, req->packet.dmac); + ether_addr_copy(pfvf->mac_addr, req->packet.dmac); + } + + if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7) + rule->vfvlan_cfg = true; + + return 0; +} + +int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, + struct npc_install_flow_req *req, + struct npc_install_flow_rsp *rsp) +{ + bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + bool pf_set_vfs_mac = false; + bool enable = true; + u16 target; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -ENODEV; + } + + if (!is_npc_interface_valid(rvu, req->intf)) + return -EINVAL; + + if (from_vf && req->default_rule) + return NPC_MCAM_PERM_DENIED; + + /* Each PF/VF info is maintained in struct rvu_pfvf. + * rvu_pfvf for the target PF/VF needs to be retrieved + * hence modify pcifunc accordingly. + */ + + /* AF installing for a PF/VF */ + if (!req->hdr.pcifunc) + target = req->vf; + /* PF installing for its VF */ + else if (!from_vf && req->vf) { + target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; + pf_set_vfs_mac = req->default_rule && + (req->features & BIT_ULL(NPC_DMAC)); + } + /* msg received from PF/VF */ + else + target = req->hdr.pcifunc; + + if (npc_check_unsupported_flows(rvu, req->features, req->intf)) + return -EOPNOTSUPP; + + if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, target); + + /* PF installing for its VF */ + if (req->hdr.pcifunc && !from_vf && req->vf) + pfvf->pf_set_vf_cfg = 1; + + /* update req destination mac addr */ + if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) && + is_zero_ether_addr(req->packet.dmac)) { + ether_addr_copy(req->packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req->mask.dmac); + } + + err = nix_get_nixlf(rvu, target, &nixlf, NULL); + + /* If interface is uninitialized then do not enable entry */ + if (err || (!req->default_rule && !pfvf->def_ucast_rule)) + enable = false; + + /* Packets reaching NPC in Tx path implies that a + * NIXLF is properly setup and transmitting. + * Hence rules can be enabled for Tx. + */ + if (is_npc_intf_tx(req->intf)) + enable = true; + + /* Do not allow requests from uninitialized VFs */ + if (from_vf && !enable) + return -EINVAL; + + /* If message is from VF then its flow should not overlap with + * reserved unicast flow. + */ + if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) && + pfvf->def_ucast_rule->features & req->features) + return -EINVAL; + + return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, + enable, pf_set_vfs_mac); +} + +static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, + u16 pcifunc) +{ + struct npc_mcam_ena_dis_entry_req dis_req = { 0 }; + struct msg_rsp dis_rsp; + + if (rule->default_rule) + return 0; + + if (rule->has_cntr) + rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); + + dis_req.hdr.pcifunc = pcifunc; + dis_req.entry = rule->entry; + + list_del(&rule->list); + kfree(rule); + + return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp); +} + +int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu, + struct npc_delete_flow_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *iter, *tmp; + u16 pcifunc = req->hdr.pcifunc; + struct list_head del_list; + + INIT_LIST_HEAD(&del_list); + + mutex_lock(&mcam->lock); + list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) { + if (iter->owner == pcifunc) { + /* All rules */ + if (req->all) { + list_move_tail(&iter->list, &del_list); + /* Range of rules */ + } else if (req->end && iter->entry >= req->start && + iter->entry <= req->end) { + list_move_tail(&iter->list, &del_list); + /* single rule */ + } else if (req->entry == iter->entry) { + list_move_tail(&iter->list, &del_list); + break; + } + } + } + mutex_unlock(&mcam->lock); + + list_for_each_entry_safe(iter, tmp, &del_list, list) { + u16 entry = iter->entry; + + /* clear the mcam entry target pcifunc */ + mcam->entry2target_pffunc[entry] = 0x0; + if (npc_delete_flow(rvu, iter, pcifunc)) + dev_err(rvu->dev, "rule deletion failed for entry:%u", + entry); + } + + return 0; +} + +static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr, + struct rvu_npc_mcam_rule *rule, + struct rvu_pfvf *pfvf) +{ + struct npc_mcam_write_entry_req write_req = { 0 }; + struct mcam_entry *entry = &write_req.entry_data; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct msg_rsp rsp; + u8 intf, enable; + int err; + + ether_addr_copy(rule->packet.dmac, pfvf->mac_addr); + + npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry, + entry, &intf, &enable); + + npc_update_entry(rvu, NPC_DMAC, entry, + ether_addr_to_u64(pfvf->mac_addr), 0, + 0xffffffffffffull, 0, intf); + + write_req.hdr.pcifunc = rule->owner; + write_req.entry = rule->entry; + + mutex_unlock(&mcam->lock); + err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp); + mutex_lock(&mcam->lock); + + return err; +} + +void npc_mcam_enable_flows(struct rvu *rvu, u16 target) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target); + struct rvu_npc_mcam_rule *def_ucast_rule; + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_npc_mcam_rule *rule; + int blkaddr, bank, index; + u64 def_action; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + def_ucast_rule = pfvf->def_ucast_rule; + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (is_npc_intf_rx(rule->intf) && + rule->rx_action.pf_func == target && !rule->enable) { + if (rule->default_rule) { + npc_enable_mcam_entry(rvu, mcam, blkaddr, + rule->entry, true); + rule->enable = true; + continue; + } + + if (rule->vfvlan_cfg) + npc_update_dmac_value(rvu, blkaddr, rule, pfvf); + + if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) { + if (!def_ucast_rule) + continue; + /* Use default unicast entry action */ + rule->rx_action = def_ucast_rule->rx_action; + def_action = *(u64 *)&def_ucast_rule->rx_action; + bank = npc_get_bank(mcam, rule->entry); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION + (rule->entry, bank), def_action); + } + + npc_enable_mcam_entry(rvu, mcam, blkaddr, + rule->entry, true); + rule->enable = true; + } + } + + /* Enable MCAM entries installed by PF with target as VF pcifunc */ + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2target_pffunc[index] == target) + npc_enable_mcam_entry(rvu, mcam, blkaddr, + index, true); + } + mutex_unlock(&mcam->lock); +} + +void npc_mcam_disable_flows(struct rvu *rvu, u16 target) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + mutex_lock(&mcam->lock); + /* Disable MCAM entries installed by PF with target as VF pcifunc */ + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2target_pffunc[index] == target) + npc_enable_mcam_entry(rvu, mcam, blkaddr, + index, false); + } + mutex_unlock(&mcam->lock); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c index 9d7c135c7965..e266f0c49559 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -35,7 +35,7 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {0x1200, 0x12E0} } }, {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, {0x1610, 0x1618} } }, - {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } }, + {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 7ca599b973c0..0fb2aa909a23 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -54,20 +54,20 @@ #define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16) #define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16) #define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16) -#define RVU_PRIV_PFX_NIX0_CFG (0x8000300) +#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3) #define RVU_PRIV_PFX_NPA_CFG (0x8000310) #define RVU_PRIV_PFX_SSO_CFG (0x8000320) #define RVU_PRIV_PFX_SSOW_CFG (0x8000330) #define RVU_PRIV_PFX_TIM_CFG (0x8000340) -#define RVU_PRIV_PFX_CPT0_CFG (0x8000350) +#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3) #define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3) #define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16) -#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300) +#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3) #define RVU_PRIV_HWVFX_NPA_CFG (0x8001310) #define RVU_PRIV_HWVFX_SSO_CFG (0x8001320) #define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330) #define RVU_PRIV_HWVFX_TIM_CFG (0x8001340) -#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350) +#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3) /* RVU PF registers */ #define RVU_PF_VFX_PFVF_MBOX0 (0x00000) @@ -429,12 +429,63 @@ #define TIM_AF_LF_RST (0x20) /* CPT */ -#define CPT_AF_CONSTANTS0 (0x0000) -#define CPT_PRIV_LFX_CFG (0x41000) -#define CPT_PRIV_LFX_INT_CFG (0x43000) -#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000) -#define CPT_AF_LF_RST (0x44000) -#define CPT_AF_BLK_RST (0x46000) +#define CPT_AF_CONSTANTS0 (0x0000) +#define CPT_AF_CONSTANTS1 (0x1000) +#define CPT_AF_DIAG (0x3000) +#define CPT_AF_ECO (0x4000) +#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3) +#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3) +#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3) +#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3) +#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3) +#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3) +#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3) +#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3) +#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3) +#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3) +#define CPT_AF_EXE_ERR_INFO (0x14000) +#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3) +#define CPT_AF_INST_REQ_PC (0x17000) +#define CPT_AF_INST_LATENCY_PC (0x18000) +#define CPT_AF_RD_REQ_PC (0x19000) +#define CPT_AF_RD_LATENCY_PC (0x1a000) +#define CPT_AF_RD_UC_PC (0x1b000) +#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000) +#define CPT_AF_EXE_DBG_CTL (0x1d000) +#define CPT_AF_EXE_DBG_DATA (0x1e000) +#define CPT_AF_EXE_REQ_TIMER (0x1f000) +#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3) +#define CPT_AF_EXE_PERF_CTL (0x21000) +#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3) +#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000) +#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3) +#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3) +#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3) +#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3) +#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3) +#define CPT_AF_CPTCLK_CNT (0x2a000) +#define CPT_AF_PF_FUNC (0x2b000) +#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3) +#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3) +#define CPT_AF_CTL (0x2e000ull) +#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3) +#define CPT_PRIV_LFX_CFG (0x41000) +#define CPT_PRIV_AF_INT_CFG (0x42000) +#define CPT_PRIV_LFX_INT_CFG (0x43000) +#define CPT_AF_LF_RST (0x44000) +#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000) +#define CPT_AF_BLK_RST (0x46000) +#define CPT_AF_RVU_INT (0x47000) +#define CPT_AF_RVU_INT_W1S (0x47008) +#define CPT_AF_RVU_INT_ENA_W1S (0x47010) +#define CPT_AF_RVU_INT_ENA_W1C (0x47018) +#define CPT_AF_RAS_INT (0x47020) +#define CPT_AF_RAS_INT_W1S (0x47028) +#define CPT_AF_RAS_INT_ENA_W1S (0x47030) +#define CPT_AF_RAS_INT_ENA_W1C (0x47038) + +#define CPT_AF_LF_CTL2_SHIFT 3 +#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32 #define NPC_AF_BLK_RST (0x00040) @@ -446,6 +497,8 @@ #define NPC_AF_BLK_RST (0x00040) #define NPC_AF_MCAM_SCRUB_CTL (0x000a0) #define NPC_AF_KCAM_SCRUB_CTL (0x000b0) +#define NPC_AF_CONST2 (0x00100) +#define NPC_AF_CONST3 (0x00110) #define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3) #define NPC_AF_PCK_CFG (0x00600) #define NPC_AF_PCK_DEF_OL2 (0x00610) @@ -469,20 +522,7 @@ (0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3) #define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \ (0x980000 | (a) << 16 | (b) << 12 | (c) << 3) -#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \ - (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3) -#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \ - (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3) -#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \ - (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3) -#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4) -#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \ - (0x1880000 | (a) << 8 | (b) << 4) -#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8) #define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8) -#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4) -#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \ - (0x1900008 | (a) << 8 | (b) << 4) #define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4) #define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4) #define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4) @@ -499,6 +539,70 @@ #define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4) #define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4) +#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \ + u64 offset; \ + \ + offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \ + offset; }) + +#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \ + u64 offset; \ + \ + offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \ + offset; }) + +#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \ + u64 offset; \ + \ + offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \ + offset; }) + +#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \ + u64 offset; \ + \ + offset = (0x1800000ull | (a) << 8 | (b) << 4); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000038ull | (a) << 8 | (b) << 22); \ + offset; }) + +#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \ + u64 offset; \ + \ + offset = (0x1900000ull | (a) << 8 | (b) << 4); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000040ull | (a) << 8 | (b) << 22); \ + offset; }) \ + +#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \ + u64 offset; \ + \ + offset = (0x1900008ull | (a) << 8 | (b) << 4); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000048ull | (a) << 8 | (b) << 22); \ + offset; }) \ + +#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \ + u64 offset; \ + \ + offset = (0x1880000ull | (a) << 8 | (b) << 4); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000050ull | (a) << 8 | (b) << 22); \ + offset; }) \ + +#define NPC_AF_MATCH_STATX(a) ({ \ + u64 offset; \ + \ + offset = (0x1880008ull | (a) << 8); \ + if (rvu->hw->npc_ext_set) \ + offset = (0x8000078ull | (a) << 8); \ + offset; }) \ + /* NDC */ #define NDC_AF_CONST (0x00000) #define NDC_AF_CLK_EN (0x00020) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index a3ecb5de9000..e2153d47c373 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -14,6 +14,8 @@ /* RVU Block revision IDs */ #define RVU_BLK_RVUM_REVID 0x01 +#define RVU_MULTI_BLK_VER 0x7ULL + /* RVU Block Address Enumeration */ enum rvu_block_addr_e { BLKADDR_RVUM = 0x0ULL, @@ -31,7 +33,9 @@ enum rvu_block_addr_e { BLKADDR_NDC_NIX0_RX = 0xcULL, BLKADDR_NDC_NIX0_TX = 0xdULL, BLKADDR_NDC_NPA0 = 0xeULL, - BLK_COUNT = 0xfULL, + BLKADDR_NDC_NIX1_RX = 0x10ULL, + BLKADDR_NDC_NIX1_TX = 0x11ULL, + BLK_COUNT = 0x12ULL, }; /* RVU Block Type Enumeration */ @@ -60,6 +64,16 @@ enum rvu_af_int_vec_e { RVU_AF_INT_VEC_CNT = 0x5, }; +/* NPA Admin function Interrupt Vector Enumeration */ +enum npa_af_int_vec_e { + NPA_AF_INT_VEC_RVU = 0x0, + NPA_AF_INT_VEC_GEN = 0x1, + NPA_AF_INT_VEC_AQ_DONE = 0x2, + NPA_AF_INT_VEC_AF_ERR = 0x3, + NPA_AF_INT_VEC_POISON = 0x4, + NPA_AF_INT_VEC_CNT = 0x5, +}; + /** * RVU PF Interrupt Vector Enumeration */ @@ -100,6 +114,19 @@ enum npa_aq_instop { NPA_AQ_INSTOP_UNLOCK = 0x5, }; +/* ALLOC/FREE input queues Enumeration from coprocessors */ +enum npa_inpq { + NPA_INPQ_NIX0_RX = 0x0, + NPA_INPQ_NIX0_TX = 0x1, + NPA_INPQ_NIX1_RX = 0x2, + NPA_INPQ_NIX1_TX = 0x3, + NPA_INPQ_SSO = 0x4, + NPA_INPQ_TIM = 0x5, + NPA_INPQ_DPI = 0x6, + NPA_INPQ_AURA_OP = 0xe, + NPA_INPQ_INTERNAL_RSV = 0xf, +}; + /* NPA admin queue instruction structure */ struct npa_aq_inst_s { #if defined(__BIG_ENDIAN_BITFIELD) @@ -917,4 +944,15 @@ enum nix_vtag_size { VTAGSIZE_T4 = 0x0, VTAGSIZE_T8 = 0x1, }; + +enum nix_tx_vtag_op { + NOP = 0x0, + VTAG_INSERT = 0x1, + VTAG_REPLACE = 0x2, +}; + +/* NIX RX VTAG actions */ +#define VTAG_STRIP BIT_ULL(4) +#define VTAG_CAPTURE BIT_ULL(5) + #endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index b2c6385707c9..4193ae3bde6b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o + otx2_ptp.o otx2_flows.o octeontx2_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index d2581090f9a4..73fb94dd5fbc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -191,10 +191,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) + if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - else + /* update dmac field in vlan offload rule */ + if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + otx2_install_rxvlan_offload_flow(pfvf); + } else { return -EPERM; + } return 0; } @@ -355,7 +359,8 @@ int otx2_rss_init(struct otx2_nic *pfvf) rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | - NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN; + NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN | + NIX_FLOW_KEY_TYPE_IPV4_PROTO; ret = otx2_set_flowkey_cfg(pfvf); if (ret) @@ -531,8 +536,10 @@ static int otx2_get_link(struct otx2_nic *pfvf) link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); } /* LBK channel */ - if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) - link = 12; + if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) { + map = pfvf->hw.tx_chan_base & 0x7FF; + link = pfvf->hw.cgx_links | ((map >> 8) & 0xF); + } return link; } @@ -1237,7 +1244,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) sq = &qset->sq[qidx]; sq->sqb_count = 0; - sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL); + sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); if (!sq->sqb_ptrs) return -ENOMEM; @@ -1503,6 +1510,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.tx_chan_base = rsp->tx_chan_base; pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; + pfvf->hw.cgx_links = rsp->cgx_links; + pfvf->hw.lbk_links = rsp->lbk_links; } EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index d6253f2a414d..103430400a8a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -11,13 +11,16 @@ #ifndef OTX2_COMMON_H #define OTX2_COMMON_H +#include <linux/ethtool.h> #include <linux/pci.h> #include <linux/iommu.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> +#include <linux/soc/marvell/octeontx2/asm.h> #include <mbox.h> +#include <npc.h> #include "otx2_reg.h" #include "otx2_txrx.h" #include <rvu_trace.h> @@ -197,12 +200,17 @@ struct otx2_hw { struct otx2_drv_stats drv_stats; u64 cgx_rx_stats[CGX_RX_STATS_COUNT]; u64 cgx_tx_stats[CGX_TX_STATS_COUNT]; + u8 cgx_links; /* No. of CGX links present in HW */ + u8 lbk_links; /* No. of LBK links present in HW */ }; struct otx2_vf_config { struct otx2_nic *pf; struct delayed_work link_event_work; bool intf_down; /* interface was either configured or not */ + u8 mac[ETH_ALEN]; + u16 vlan; + int tx_vtag_idx; }; struct flr_work { @@ -226,6 +234,32 @@ struct otx2_ptp { #define OTX2_HW_TIMESTAMP_LEN 8 +struct otx2_mac_table { + u8 addr[ETH_ALEN]; + u16 mcam_entry; + bool inuse; +}; + +struct otx2_flow_config { + u16 entry[NPC_MAX_NONCONTIG_ENTRIES]; + u32 nr_flows; +#define OTX2_MAX_NTUPLE_FLOWS 32 +#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_MAX_VLAN_FLOWS 1 +#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \ + OTX2_MAX_UNICAST_FLOWS + \ + OTX2_MAX_VLAN_FLOWS) + u32 ntuple_offset; + u32 unicast_offset; + u32 rx_vlan_offset; + u32 vf_vlan_offset; +#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */ +#define OTX2_VF_VLAN_RX_INDEX 0 +#define OTX2_VF_VLAN_TX_INDEX 1 + u32 ntuple_max_flows; + struct list_head flow_list; +}; + struct otx2_nic { void __iomem *reg_base; struct net_device *netdev; @@ -236,6 +270,12 @@ struct otx2_nic { #define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0) #define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1) #define OTX2_FLAG_INTF_DOWN BIT_ULL(2) +#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3) +#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4) +#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5) +#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6) +#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7) +#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8) #define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9) #define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10) u64 flags; @@ -264,6 +304,7 @@ struct otx2_nic { struct refill_work *refill_wrk; struct workqueue_struct *otx2_wq; struct work_struct rx_mode_work; + struct otx2_mac_table *mac_table; /* Ethtool stuff */ u32 msg_enable; @@ -273,6 +314,8 @@ struct otx2_nic { struct otx2_ptp *ptp; struct hwtstamp_config tstamp; + + struct otx2_flow_config *flow_cfg; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -421,21 +464,9 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr) return result; } -static inline u64 otx2_lmt_flush(uint64_t addr) -{ - u64 result = 0; - - __asm__ volatile(".cpu generic+lse\n" - "ldeor xzr,%x[rf],[%[rs]]" - : [rf]"=r"(result) - : [rs]"r"(addr)); - return result; -} - #else #define otx2_write128(lo, hi, addr) #define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; }) -#define otx2_lmt_flush(addr) ({ 0; }) #endif /* Alloc pointer from pool/aura */ @@ -642,4 +673,24 @@ int otx2_open(struct net_device *netdev); int otx2_stop(struct net_device *netdev); int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); +/* MCAM filter related APIs */ +int otx2_mcam_flow_init(struct otx2_nic *pf); +int otx2_alloc_mcam_entries(struct otx2_nic *pfvf); +void otx2_mcam_flow_del(struct otx2_nic *pf); +int otx2_destroy_ntuple_flows(struct otx2_nic *pf); +int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); +int otx2_get_flow(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc, u32 location); +int otx2_get_all_flows(struct otx2_nic *pfvf, + struct ethtool_rxnfc *nfc, u32 *rule_locs); +int otx2_add_flow(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp); +int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); +int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, + struct npc_install_flow_req *req); +int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); +int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); +int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); +int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); + #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 662fb80dbb9d..67171b66a56c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -551,6 +551,16 @@ static int otx2_get_rxnfc(struct net_device *dev, nfc->data = pfvf->hw.rx_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + nfc->rule_cnt = pfvf->flow_cfg->nr_flows; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + ret = otx2_get_all_flows(pfvf, nfc, rules); + break; case ETHTOOL_GRXFH: return otx2_get_rss_hash_opts(pfvf, nfc); default: @@ -561,6 +571,50 @@ static int otx2_get_rxnfc(struct net_device *dev, static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) { + bool ntuple = !!(dev->features & NETIF_F_NTUPLE); + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_SRXFH: + ret = otx2_set_rss_hash_opts(pfvf, nfc); + break; + case ETHTOOL_SRXCLSRLINS: + if (netif_running(dev) && ntuple) + ret = otx2_add_flow(pfvf, &nfc->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + if (netif_running(dev) && ntuple) + ret = otx2_remove_flow(pfvf, nfc->fs.location); + break; + default: + break; + } + + return ret; +} + +static int otx2vf_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rules) +{ + struct otx2_nic *pfvf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (nfc->cmd) { + case ETHTOOL_GRXRINGS: + nfc->data = pfvf->hw.rx_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + return otx2_get_rss_hash_opts(pfvf, nfc); + default: + break; + } + return ret; +} + +static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) +{ struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -806,8 +860,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_sset_count = otx2vf_get_sset_count, .set_channels = otx2_set_channels, .get_channels = otx2_get_channels, - .get_rxnfc = otx2_get_rxnfc, - .set_rxnfc = otx2_set_rxnfc, + .get_rxnfc = otx2vf_get_rxnfc, + .set_rxnfc = otx2vf_set_rxnfc, .get_rxfh_key_size = otx2_get_rxfh_key_size, .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c new file mode 100644 index 000000000000..be8ccfce1848 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2020 Marvell. + */ + +#include <net/ipv6.h> + +#include "otx2_common.h" + +#define OTX2_DEFAULT_ACTION 0x1 + +struct otx2_flow { + struct ethtool_rx_flow_spec flow_spec; + struct list_head list; + u32 location; + u16 entry; + bool is_vf; + int vf; +}; + +int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + int vf_vlan_max_flows; + int i; + + mutex_lock(&pfvf->mbox.lock); + + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; + req->contig = false; + req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows; + + /* Send message to AF */ + if (otx2_sync_mbox_msg(&pfvf->mbox)) { + mutex_unlock(&pfvf->mbox.lock); + return -EINVAL; + } + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&pfvf->mbox.mbox, 0, &req->hdr); + + if (rsp->count != req->count) { + netdev_info(pfvf->netdev, + "Unable to allocate %d MCAM entries, got %d\n", + req->count, rsp->count); + /* support only ntuples here */ + flow_cfg->ntuple_max_flows = rsp->count; + flow_cfg->ntuple_offset = 0; + pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + } else { + flow_cfg->vf_vlan_offset = 0; + flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset + + vf_vlan_max_flows; + flow_cfg->unicast_offset = flow_cfg->ntuple_offset + + OTX2_MAX_NTUPLE_FLOWS; + flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + + OTX2_MAX_UNICAST_FLOWS; + pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; + pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; + pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT; + } + + for (i = 0; i < rsp->count; i++) + flow_cfg->entry[i] = rsp->entry_list[i]; + + pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + + mutex_unlock(&pfvf->mbox.lock); + + return 0; +} + +int otx2_mcam_flow_init(struct otx2_nic *pf) +{ + int err; + + pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config), + GFP_KERNEL); + if (!pf->flow_cfg) + return -ENOMEM; + + INIT_LIST_HEAD(&pf->flow_cfg->flow_list); + + pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS; + + err = otx2_alloc_mcam_entries(pf); + if (err) + return err; + + pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) + * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); + if (!pf->mac_table) + return -ENOMEM; + + return 0; +} + +void otx2_mcam_flow_del(struct otx2_nic *pf) +{ + otx2_destroy_mcam_flows(pf); +} + +/* On success adds mcam entry + * On failure enable promisous mode + */ +static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) +{ + struct otx2_flow_config *flow_cfg = pf->flow_cfg; + struct npc_install_flow_req *req; + int err, i; + + if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) + return -ENOMEM; + + /* dont have free mcam entries or uc list is greater than alloted */ + if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS) + return -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + /* unicast offset starts with 32 0..31 for ntuple */ + for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + if (pf->mac_table[i].inuse) + continue; + ether_addr_copy(pf->mac_table[i].addr, mac); + pf->mac_table[i].inuse = true; + pf->mac_table[i].mcam_entry = + flow_cfg->entry[i + flow_cfg->unicast_offset]; + req->entry = pf->mac_table[i].mcam_entry; + break; + } + + ether_addr_copy(req->packet.dmac, mac); + eth_broadcast_addr((u8 *)&req->mask.dmac); + req->features = BIT_ULL(NPC_DMAC); + req->channel = pf->hw.rx_chan_base; + req->intf = NIX_INTF_RX; + req->op = NIX_RX_ACTION_DEFAULT; + req->set_cntr = 1; + + err = otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); + + return err; +} + +int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + return otx2_do_add_macfilter(pf, mac); +} + +static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, + int *mcam_entry) +{ + int i; + + for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + if (!pf->mac_table[i].inuse) + continue; + + if (ether_addr_equal(pf->mac_table[i].addr, mac)) { + *mcam_entry = pf->mac_table[i].mcam_entry; + pf->mac_table[i].inuse = false; + return true; + } + } + return false; +} + +int otx2_del_macfilter(struct net_device *netdev, const u8 *mac) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct npc_delete_flow_req *req; + int err, mcam_entry; + + /* check does mcam entry exists for given mac */ + if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry)) + return 0; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + req->entry = mcam_entry; + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); + + return err; +} + +static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location) +{ + struct otx2_flow *iter; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->location == location) + return iter; + } + + return NULL; +} + +static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) +{ + struct list_head *head = &pfvf->flow_cfg->flow_list; + struct otx2_flow *iter; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->location > flow->location) + break; + head = &iter->list; + } + + list_add(&flow->list, head); +} + +int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, + u32 location) +{ + struct otx2_flow *iter; + + if (location >= pfvf->flow_cfg->ntuple_max_flows) + return -EINVAL; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->location == location) { + nfc->fs = iter->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, + u32 *rule_locs) +{ + u32 location = 0; + int idx = 0; + int err = 0; + + nfc->data = pfvf->flow_cfg->ntuple_max_flows; + while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) { + err = otx2_get_flow(pfvf, nfc, location); + if (!err) + rule_locs[idx++] = location; + location++; + } + + return err; +} + +static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp, + struct npc_install_flow_req *req, + u32 flow_type) +{ + struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec; + struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec; + struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec; + struct flow_msg *pmask = &req->mask; + struct flow_msg *pkt = &req->packet; + + switch (flow_type) { + case IP_USER_FLOW: + if (ipv4_usr_mask->ip4src) { + memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src, + sizeof(pkt->ip4src)); + memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src, + sizeof(pmask->ip4src)); + req->features |= BIT_ULL(NPC_SIP_IPV4); + } + if (ipv4_usr_mask->ip4dst) { + memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst, + sizeof(pkt->ip4dst)); + memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst, + sizeof(pmask->ip4dst)); + req->features |= BIT_ULL(NPC_DIP_IPV4); + } + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + if (ipv4_l4_mask->ip4src) { + memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src, + sizeof(pkt->ip4src)); + memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src, + sizeof(pmask->ip4src)); + req->features |= BIT_ULL(NPC_SIP_IPV4); + } + if (ipv4_l4_mask->ip4dst) { + memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst, + sizeof(pkt->ip4dst)); + memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst, + sizeof(pmask->ip4dst)); + req->features |= BIT_ULL(NPC_DIP_IPV4); + } + if (ipv4_l4_mask->psrc) { + memcpy(&pkt->sport, &ipv4_l4_hdr->psrc, + sizeof(pkt->sport)); + memcpy(&pmask->sport, &ipv4_l4_mask->psrc, + sizeof(pmask->sport)); + if (flow_type == UDP_V4_FLOW) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else if (flow_type == TCP_V4_FLOW) + req->features |= BIT_ULL(NPC_SPORT_TCP); + else + req->features |= BIT_ULL(NPC_SPORT_SCTP); + } + if (ipv4_l4_mask->pdst) { + memcpy(&pkt->dport, &ipv4_l4_hdr->pdst, + sizeof(pkt->dport)); + memcpy(&pmask->dport, &ipv4_l4_mask->pdst, + sizeof(pmask->dport)); + if (flow_type == UDP_V4_FLOW) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else if (flow_type == TCP_V4_FLOW) + req->features |= BIT_ULL(NPC_DPORT_TCP); + else + req->features |= BIT_ULL(NPC_DPORT_SCTP); + } + break; + default: + break; + } +} + +static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, + struct npc_install_flow_req *req, + u32 flow_type) +{ + struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec; + struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec; + struct flow_msg *pmask = &req->mask; + struct flow_msg *pkt = &req->packet; + + switch (flow_type) { + case IPV6_USER_FLOW: + if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) { + memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src, + sizeof(pkt->ip6src)); + memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src, + sizeof(pmask->ip6src)); + req->features |= BIT_ULL(NPC_SIP_IPV6); + } + if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) { + memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst, + sizeof(pkt->ip6dst)); + memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst, + sizeof(pmask->ip6dst)); + req->features |= BIT_ULL(NPC_DIP_IPV6); + } + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) { + memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src, + sizeof(pkt->ip6src)); + memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src, + sizeof(pmask->ip6src)); + req->features |= BIT_ULL(NPC_SIP_IPV6); + } + if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) { + memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst, + sizeof(pkt->ip6dst)); + memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst, + sizeof(pmask->ip6dst)); + req->features |= BIT_ULL(NPC_DIP_IPV6); + } + if (ipv6_l4_mask->psrc) { + memcpy(&pkt->sport, &ipv6_l4_hdr->psrc, + sizeof(pkt->sport)); + memcpy(&pmask->sport, &ipv6_l4_mask->psrc, + sizeof(pmask->sport)); + if (flow_type == UDP_V6_FLOW) + req->features |= BIT_ULL(NPC_SPORT_UDP); + else if (flow_type == TCP_V6_FLOW) + req->features |= BIT_ULL(NPC_SPORT_TCP); + else + req->features |= BIT_ULL(NPC_SPORT_SCTP); + } + if (ipv6_l4_mask->pdst) { + memcpy(&pkt->dport, &ipv6_l4_hdr->pdst, + sizeof(pkt->dport)); + memcpy(&pmask->dport, &ipv6_l4_mask->pdst, + sizeof(pmask->dport)); + if (flow_type == UDP_V6_FLOW) + req->features |= BIT_ULL(NPC_DPORT_UDP); + else if (flow_type == TCP_V6_FLOW) + req->features |= BIT_ULL(NPC_DPORT_TCP); + else + req->features |= BIT_ULL(NPC_DPORT_SCTP); + } + break; + default: + break; + } +} + +int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, + struct npc_install_flow_req *req) +{ + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + struct flow_msg *pmask = &req->mask; + struct flow_msg *pkt = &req->packet; + u32 flow_type; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + switch (flow_type) { + /* bits not set in mask are don't care */ + case ETHER_FLOW: + if (!is_zero_ether_addr(eth_mask->h_source)) { + ether_addr_copy(pkt->smac, eth_hdr->h_source); + ether_addr_copy(pmask->smac, eth_mask->h_source); + req->features |= BIT_ULL(NPC_SMAC); + } + if (!is_zero_ether_addr(eth_mask->h_dest)) { + ether_addr_copy(pkt->dmac, eth_hdr->h_dest); + ether_addr_copy(pmask->dmac, eth_mask->h_dest); + req->features |= BIT_ULL(NPC_DMAC); + } + if (eth_mask->h_proto) { + memcpy(&pkt->etype, ð_hdr->h_proto, + sizeof(pkt->etype)); + memcpy(&pmask->etype, ð_mask->h_proto, + sizeof(pmask->etype)); + req->features |= BIT_ULL(NPC_ETYPE); + } + break; + case IP_USER_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + otx2_prepare_ipv4_flow(fsp, req, flow_type); + break; + case IPV6_USER_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + otx2_prepare_ipv6_flow(fsp, req, flow_type); + break; + default: + return -EOPNOTSUPP; + } + if (fsp->flow_type & FLOW_EXT) { + if (fsp->m_ext.vlan_etype) + return -EINVAL; + if (fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)) + return -EINVAL; + if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + + memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, + sizeof(pkt->vlan_tci)); + memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci, + sizeof(pmask->vlan_tci)); + req->features |= BIT_ULL(NPC_OUTER_VID); + } + + /* Not Drop/Direct to queue but use action in default entry */ + if (fsp->m_ext.data[1] && + fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION)) + req->op = NIX_RX_ACTION_DEFAULT; + } + + if (fsp->flow_type & FLOW_MAC_EXT && + !is_zero_ether_addr(fsp->m_ext.h_dest)) { + ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest); + ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest); + req->features |= BIT_ULL(NPC_DMAC); + } + + if (!req->features) + return -EOPNOTSUPP; + + return 0; +} + +static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) +{ + u64 ring_cookie = flow->flow_spec.ring_cookie; + struct npc_install_flow_req *req; + int err, vf = 0; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_prepare_flow_request(&flow->flow_spec, req); + if (err) { + /* free the allocated msg above */ + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + mutex_unlock(&pfvf->mbox.lock); + return err; + } + + req->entry = flow->entry; + req->intf = NIX_INTF_RX; + req->set_cntr = 1; + req->channel = pfvf->hw.rx_chan_base; + if (ring_cookie == RX_CLS_FLOW_DISC) { + req->op = NIX_RX_ACTIONOP_DROP; + } else { + /* change to unicast only if action of default entry is not + * requested by user + */ + if (req->op != NIX_RX_ACTION_DEFAULT) + req->op = NIX_RX_ACTIONOP_UCAST; + req->index = ethtool_get_flow_spec_ring(ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + if (vf > pci_num_vf(pfvf->pdev)) { + mutex_unlock(&pfvf->mbox.lock); + return -EINVAL; + } + } + + /* ethtool ring_cookie has (VF + 1) for VF */ + if (vf) { + req->vf = vf; + flow->is_vf = true; + flow->vf = vf; + } + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + struct otx2_flow *flow; + bool new = false; + int err; + + if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) + return -ENOMEM; + + if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + if (fsp->location >= flow_cfg->ntuple_max_flows) + return -EINVAL; + + flow = otx2_find_flow(pfvf, fsp->location); + if (!flow) { + flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + if (!flow) + return -ENOMEM; + flow->location = fsp->location; + flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset + + flow->location]; + new = true; + } + /* struct copy */ + flow->flow_spec = *fsp; + + err = otx2_add_flow_msg(pfvf, flow); + if (err) { + if (new) + kfree(flow); + return err; + } + + /* add the new flow installed to list */ + if (new) { + otx2_add_flow_to_list(pfvf, flow); + flow_cfg->nr_flows++; + } + + return 0; +} + +static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) +{ + struct npc_delete_flow_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->entry = entry; + if (all) + req->all = 1; + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct otx2_flow *flow; + int err; + + if (location >= flow_cfg->ntuple_max_flows) + return -EINVAL; + + flow = otx2_find_flow(pfvf, location); + if (!flow) + return -ENOENT; + + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (err) + return err; + + list_del(&flow->list); + kfree(flow); + flow_cfg->nr_flows--; + + return 0; +} + +int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_delete_flow_req *req; + struct otx2_flow *iter, *tmp; + int err; + + if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) + return 0; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->start = flow_cfg->entry[flow_cfg->ntuple_offset]; + req->end = flow_cfg->entry[flow_cfg->ntuple_offset + + flow_cfg->ntuple_max_flows - 1]; + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) { + list_del(&iter->list); + kfree(iter); + flow_cfg->nr_flows--; + } + return err; +} + +int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_mcam_free_entry_req *req; + struct otx2_flow *iter, *tmp; + int err; + + if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC)) + return 0; + + /* remove all flows */ + err = otx2_remove_flow_msg(pfvf, 0, true); + if (err) + return err; + + list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) { + list_del(&iter->list); + kfree(iter); + flow_cfg->nr_flows--; + } + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->all = 1; + /* Send message to AF to free MCAM entries */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; + } + + pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; + mutex_unlock(&pfvf->mbox.lock); + + return 0; +} + +int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_install_flow_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset]; + req->intf = NIX_INTF_RX; + ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr); + eth_broadcast_addr((u8 *)&req->mask.dmac); + req->channel = pfvf->hw.rx_chan_base; + req->op = NIX_RX_ACTION_DEFAULT; + req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); + req->vtag0_valid = true; + req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; + + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; + struct npc_delete_flow_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset]; + /* Send message to AF */ + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + +int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) +{ + struct nix_vtag_config *req; + struct mbox_msghdr *rsp_hdr; + int err; + + /* Dont have enough mcam entries */ + if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)) + return -ENOMEM; + + if (enable) { + err = otx2_install_rxvlan_offload_flow(pf); + if (err) + return err; + } else { + err = otx2_delete_rxvlan_offload_flow(pf); + if (err) + return err; + } + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + /* config strip, capture and size */ + req->vtag_size = VTAGSIZE_T4; + req->cfg_type = 1; /* rx vlan cfg */ + req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req->rx.strip_vtag = enable; + req->rx.capture_vtag = enable; + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) { + mutex_unlock(&pf->mbox.lock); + return err; + } + + rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp_hdr)) { + mutex_unlock(&pf->mbox.lock); + return PTR_ERR(rsp_hdr); + } + + mutex_unlock(&pf->mbox.lock); + return rsp_hdr->rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 66f1a212f1f4..634d60655a74 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1278,6 +1278,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf) static int otx2_init_hw_resources(struct otx2_nic *pf) { + struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_hw *hw = &pf->hw; struct msg_req *req; @@ -1359,8 +1360,9 @@ err_free_rq_ptrs: otx2_aura_pool_free(pf); err_free_nix_lf: mutex_lock(&mbox->lock); - req = otx2_mbox_alloc_msg_nix_lf_free(mbox); - if (req) { + free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + if (free_req) { + free_req->flags = NIX_LF_DISABLE_FLOWS; if (otx2_sync_mbox_msg(mbox)) dev_err(pf->dev, "%s failed to free nixlf\n", __func__); } @@ -1379,6 +1381,7 @@ exit: static void otx2_free_hw_resources(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; + struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_cq_queue *cq; struct msg_req *req; @@ -1419,8 +1422,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_lock(&mbox->lock); /* Reset NIX LF */ - req = otx2_mbox_alloc_msg_nix_lf_free(mbox); - if (req) { + free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox); + if (free_req) { + free_req->flags = NIX_LF_DISABLE_FLOWS; + if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN)) + free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG; if (otx2_sync_mbox_msg(mbox)) dev_err(pf->dev, "%s failed to free nixlf\n", __func__); } @@ -1562,6 +1568,9 @@ int otx2_open(struct net_device *netdev) otx2_set_cints_affinity(pf); + if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + otx2_enable_rxvlan(pf, true); + /* When reinitializing enable time stamping if it is enabled before */ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) { pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED; @@ -1716,10 +1725,20 @@ static void otx2_do_set_rx_mode(struct work_struct *work) struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); struct net_device *netdev = pf->netdev; struct nix_rx_mode *req; + bool promisc = false; if (!(netdev->flags & IFF_UP)) return; + if ((netdev->flags & IFF_PROMISC) || + (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + promisc = true; + } + + /* Write unicast address to mcam entries or del from mcam */ + if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) + __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); + mutex_lock(&pf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); if (!req) { @@ -1729,8 +1748,7 @@ static void otx2_do_set_rx_mode(struct work_struct *work) req->mode = NIX_RX_MODE_UCAST; - /* We don't support MAC address filtering yet */ - if (netdev->flags & IFF_PROMISC) + if (promisc) req->mode |= NIX_RX_MODE_PROMISC; else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; @@ -1743,11 +1761,20 @@ static int otx2_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = features ^ netdev->features; + bool ntuple = !!(features & NETIF_F_NTUPLE); struct otx2_nic *pf = netdev_priv(netdev); if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) return otx2_cgx_config_loopback(pf, features & NETIF_F_LOOPBACK); + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev)) + return otx2_enable_rxvlan(pf, + features & NETIF_F_HW_VLAN_CTAG_RX); + + if ((changed & NETIF_F_NTUPLE) && !ntuple) + otx2_destroy_ntuple_flows(pf); + return 0; } @@ -1903,6 +1930,245 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) } } +static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) +{ + struct npc_install_flow_req *req; + int err; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + err = -ENOMEM; + goto out; + } + + ether_addr_copy(req->packet.dmac, mac); + eth_broadcast_addr((u8 *)&req->mask.dmac); + req->features = BIT_ULL(NPC_DMAC); + req->channel = pf->hw.rx_chan_base; + req->intf = NIX_INTF_RX; + req->default_rule = 1; + req->append = 1; + req->vf = vf + 1; + req->op = NIX_RX_ACTION_DEFAULT; + + err = otx2_sync_mbox_msg(&pf->mbox); +out: + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + struct otx2_vf_config *config; + int ret; + + if (!netif_running(netdev)) + return -EAGAIN; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + config = &pf->vf_configs[vf]; + ether_addr_copy(config->mac, mac); + + ret = otx2_do_set_vf_mac(pf, vf, mac); + if (ret == 0) + dev_info(&pdev->dev, "Reload VF driver to apply the changes\n"); + + return ret; +} + +static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos, + __be16 proto) +{ + struct otx2_flow_config *flow_cfg = pf->flow_cfg; + struct nix_vtag_config_rsp *vtag_rsp; + struct npc_delete_flow_req *del_req; + struct nix_vtag_config *vtag_req; + struct npc_install_flow_req *req; + struct otx2_vf_config *config; + int err = 0; + u32 idx; + + config = &pf->vf_configs[vf]; + + if (!vlan && !config->vlan) + goto out; + + mutex_lock(&pf->mbox.lock); + + /* free old tx vtag entry */ + if (config->vlan) { + vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); + if (!vtag_req) { + err = -ENOMEM; + goto out; + } + vtag_req->cfg_type = 0; + vtag_req->tx.free_vtag0 = 1; + vtag_req->tx.vtag0_idx = config->tx_vtag_idx; + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + } + + if (!vlan && config->vlan) { + /* rx */ + del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); + if (!del_req) { + err = -ENOMEM; + goto out; + } + idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); + del_req->entry = + flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + /* tx */ + del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox); + if (!del_req) { + err = -ENOMEM; + goto out; + } + idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); + del_req->entry = + flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + err = otx2_sync_mbox_msg(&pf->mbox); + + goto out; + } + + /* rx */ + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + err = -ENOMEM; + goto out; + } + + idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX); + req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + req->packet.vlan_tci = htons(vlan); + req->mask.vlan_tci = htons(VLAN_VID_MASK); + /* af fills the destination mac addr */ + eth_broadcast_addr((u8 *)&req->mask.dmac); + req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC); + req->channel = pf->hw.rx_chan_base; + req->intf = NIX_INTF_RX; + req->vf = vf + 1; + req->op = NIX_RX_ACTION_DEFAULT; + req->vtag0_valid = true; + req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7; + req->set_cntr = 1; + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + /* tx */ + vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox); + if (!vtag_req) { + err = -ENOMEM; + goto out; + } + + /* configure tx vtag params */ + vtag_req->vtag_size = VTAGSIZE_T4; + vtag_req->cfg_type = 0; /* tx vlan cfg */ + vtag_req->tx.cfg_vtag0 = 1; + vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan; + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp + (&pf->mbox.mbox, 0, &vtag_req->hdr); + if (IS_ERR(vtag_rsp)) { + err = PTR_ERR(vtag_rsp); + goto out; + } + config->tx_vtag_idx = vtag_rsp->vtag0_idx; + + req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox); + if (!req) { + err = -ENOMEM; + goto out; + } + + eth_zero_addr((u8 *)&req->mask.dmac); + idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX); + req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx]; + req->features = BIT_ULL(NPC_DMAC); + req->channel = pf->hw.tx_chan_base; + req->intf = NIX_INTF_TX; + req->vf = vf + 1; + req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT; + req->vtag0_def = vtag_rsp->vtag0_idx; + req->vtag0_op = VTAG_INSERT; + req->set_cntr = 1; + + err = otx2_sync_mbox_msg(&pf->mbox); +out: + config->vlan = vlan; + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 proto) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + + if (!netif_running(netdev)) + return -EAGAIN; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + /* qos is currently unsupported */ + if (vlan >= VLAN_N_VID || qos) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT)) + return -EOPNOTSUPP; + + return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto); +} + +static int otx2_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct pci_dev *pdev = pf->pdev; + struct otx2_vf_config *config; + + if (!netif_running(netdev)) + return -EAGAIN; + + if (vf >= pci_num_vf(pdev)) + return -EINVAL; + + config = &pf->vf_configs[vf]; + ivi->vf = vf; + ether_addr_copy(ivi->mac, config->mac); + ivi->vlan = config->vlan; + + return 0; +} + static const struct net_device_ops otx2_netdev_ops = { .ndo_open = otx2_open, .ndo_stop = otx2_stop, @@ -1914,6 +2180,9 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_tx_timeout = otx2_tx_timeout, .ndo_get_stats64 = otx2_get_stats64, .ndo_do_ioctl = otx2_ioctl, + .ndo_set_vf_mac = otx2_set_vf_mac, + .ndo_set_vf_vlan = otx2_set_vf_vlan, + .ndo_get_vf_config = otx2_get_vf_config, }; static int otx2_wq_init(struct otx2_nic *pf) @@ -2110,6 +2379,25 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; + err = otx2_mcam_flow_init(pf); + if (err) + goto err_ptp_destroy; + + if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) + netdev->hw_features |= NETIF_F_NTUPLE; + + if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT) + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Support TSO on tag interface */ + netdev->vlan_features |= netdev->features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX; + netdev->features |= netdev->hw_features; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; @@ -2122,7 +2410,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_ptp_destroy; + goto err_del_mcam_entries; } err = otx2_wq_init(pf); @@ -2142,6 +2430,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); +err_del_mcam_entries: + otx2_mcam_flow_del(pf); err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: @@ -2285,6 +2575,8 @@ static void otx2_remove(struct pci_dev *pdev) pf = netdev_priv(netdev); + pf->flags |= OTX2_FLAG_PF_SHUTDOWN; + if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) otx2_config_hw_tx_tstamp(pf, false); if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) @@ -2300,6 +2592,7 @@ static void otx2_remove(struct pci_dev *pdev) destroy_workqueue(pf->otx2_wq); otx2_ptp_destroy(pf); + otx2_mcam_flow_del(pf); otx2_detach_resources(&pf->mbox); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 7bcf5246350f..56390a664517 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -12,7 +12,6 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, ptp_info); struct ptp_req *req; - int err; if (!ptp->nic) return -ENODEV; @@ -24,11 +23,7 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) req->op = PTP_OP_ADJFINE; req->scaled_ppm = scaled_ppm; - err = otx2_sync_mbox_msg(&ptp->nic->mbox); - if (err) - return err; - - return 0; + return otx2_sync_mbox_msg(&ptp->nic->mbox); } static u64 ptp_cc_read(const struct cyclecounter *cc) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index d5d7a2f37493..d0e25414f1a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -556,6 +556,19 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, ext->tstmp = 1; } +#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN) + if (skb_vlan_tag_present(skb)) { + if (skb->vlan_proto == htons(ETH_P_8021Q)) { + ext->vlan1_ins_ena = 1; + ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET; + ext->vlan1_ins_tci = skb_vlan_tag_get(skb); + } else if (skb->vlan_proto == htons(ETH_P_8021AD)) { + ext->vlan0_ins_ena = 1; + ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET; + ext->vlan0_ins_tci = skb_vlan_tag_get(skb); + } + } + *offset += sizeof(*ext); } @@ -871,6 +884,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, } if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { + /* Insert vlan tag before giving pkt to tso */ + if (skb_vlan_tag_present(skb)) + skb = __vlan_hwaccel_push_inside(skb); otx2_sq_append_tso(pfvf, sq, skb, qidx); return true; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 67fabf265fe6..d3e4cfd244e2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -558,6 +558,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4; netdev->features = netdev->hw_features; + /* Support TSO on tag interface */ + netdev->vlan_features |= netdev->features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + netdev->features |= netdev->hw_features; netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index da4b286d1337..25dd903a3e92 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -93,15 +93,10 @@ static int prestera_port_open(struct net_device *dev) static int prestera_port_close(struct net_device *dev) { struct prestera_port *port = netdev_priv(dev); - int err; netif_stop_queue(dev); - err = prestera_hw_port_state_set(port, false); - if (err) - return err; - - return 0; + return prestera_hw_port_state_set(port, false); } static netdev_tx_t prestera_port_xmit(struct sk_buff *skb, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 25981a7a43b5..ebe1406c6e64 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4900,7 +4900,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz) }; if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2) - strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); + snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]); else snprintf(buf, sz, "(chip %#x)", chipid); return buf; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c index 0fe97155dd8f..72648535a14d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_path.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c @@ -241,22 +241,18 @@ out: int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) { - int err, path; + int path; path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII : MTK_ETH_PATH_GMAC2_SGMII; /* Setup proper MUXes along the path */ - err = mtk_eth_mux_setup(eth, path); - if (err) - return err; - - return 0; + return mtk_eth_mux_setup(eth, path); } int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) { - int err, path = 0; + int path = 0; if (mac_id == 1) path = MTK_ETH_PATH_GMAC2_GEPHY; @@ -265,25 +261,17 @@ int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) return -EINVAL; /* Setup proper MUXes along the path */ - err = mtk_eth_mux_setup(eth, path); - if (err) - return err; - - return 0; + return mtk_eth_mux_setup(eth, path); } int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) { - int err, path; + int path; path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII : MTK_ETH_PATH_GMAC2_RGMII; /* Setup proper MUXes along the path */ - err = mtk_eth_mux_setup(eth, path); - if (err) - return err; - - return 0; + return mtk_eth_mux_setup(eth, path); } diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 3b8576b9c2f9..f7053a74e6a8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -462,19 +462,14 @@ EXPORT_SYMBOL_GPL(mlx4_cq_free); int mlx4_init_cq_table(struct mlx4_dev *dev) { struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; - int err; spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; - err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, - dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); - if (err) - return err; - - return 0; + return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, + dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); } void mlx4_cleanup_cq_table(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6f290319b617..32aad4d32b88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2031,7 +2031,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) en_dbg(HW, priv, "Failed dumping statistics\n"); - memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats)); memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 502d1b97855c..7954c1daf2b6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -283,7 +283,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; - if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0) + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0) goto err_ring; tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * @@ -684,7 +684,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp_prog = rcu_dereference(ring->xdp_prog); xdp.rxq = &ring->xdp_rxq; xdp.frame_sz = priv->frag_info[0].frag_stride; - doorbell_pending = 0; + doorbell_pending = false; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -914,7 +914,6 @@ next: wmb(); /* ensure HW sees CQ consumer before we post new buffers */ ring->cons = cq->mcq.cons_index; } - AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_en_refill_rx_buffers(priv, ring); @@ -966,8 +965,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) /* in case we got here because of !clean_complete */ done = budget; - INC_PERF_COUNTER(priv->pstats.napi_quota); - cpu_curr = smp_processor_id(); idata = irq_desc_get_irq_data(cq->irq_desc); aff = irq_data_get_affinity_mask(idata); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 59b097cda327..31b74bddb7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -890,9 +890,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!priv->port_up)) goto tx_drop; - /* fetch ring->cons far ahead before needing it to avoid stall */ - ring_cons = READ_ONCE(ring->cons); - real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); if (unlikely(!real_size)) @@ -924,10 +921,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); - /* Track current inflight packets for performance analysis */ - AVG_PERF_COUNTER(priv->pstats.inflight_avg, - (u32)(ring->prod - ring_cons - 1)); - /* Packet is good - grab an index and transmit it */ index = ring->prod & ring->size_mask; bf_index = ring->prod; @@ -1038,7 +1031,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->packets++; } ring->bytes += tx_info->nr_bytes; - AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); if (tx_info->inl) build_inline_wqe(tx_desc, skb, shinfo, fragptr); @@ -1167,10 +1159,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, index = ring->prod & ring->size_mask; tx_info = &ring->tx_info[index]; - /* Track current inflight packets for performance analysis */ - AVG_PERF_COUNTER(priv->pstats.inflight_avg, - (u32)(ring->prod - READ_ONCE(ring->cons) - 1)); - tx_desc = ring->buf + (index << LOG_TXBB_SIZE); data = &tx_desc->data; @@ -1195,7 +1183,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); rx_ring->xdp_tx++; - AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length); ring->prod += MLX4_EN_XDP_TX_NRTXBB; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h index 582997577a04..954b86faac29 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -135,7 +135,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, * @dev: mlx4_dev. * @port: Physical port number. * @vport: Vport id. - * @out_param: Array of mlx4_vport_qos_param which holds the requested values. + * @in_param: Array of mlx4_vport_qos_param which holds the requested values. * * Returns 0 on success or a negative mlx4_core errno code. **/ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 30378e4c90b5..17f2b1919378 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -36,6 +36,7 @@ #include <linux/bitops.h> #include <linux/compiler.h> +#include <linux/ethtool.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/netdevice.h> @@ -170,27 +171,6 @@ #define MLX4_EN_LOOPBACK_RETRIES 5 #define MLX4_EN_LOOPBACK_TIMEOUT 100 -#ifdef MLX4_EN_PERF_STAT -/* Number of samples to 'average' */ -#define AVG_SIZE 128 -#define AVG_FACTOR 1024 - -#define INC_PERF_COUNTER(cnt) (++(cnt)) -#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add)) -#define AVG_PERF_COUNTER(cnt, sample) \ - ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE) -#define GET_PERF_COUNTER(cnt) (cnt) -#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR) - -#else - -#define INC_PERF_COUNTER(cnt) do {} while (0) -#define ADD_PERF_COUNTER(cnt, add) do {} while (0) -#define AVG_PERF_COUNTER(cnt, sample) do {} while (0) -#define GET_PERF_COUNTER(cnt) (0) -#define GET_AVG_PERF_COUNTER(cnt) (0) -#endif /* MLX4_EN_PERF_STAT */ - /* Constants for TX flow */ enum { MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ @@ -608,7 +588,6 @@ struct mlx4_en_priv { struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; - struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_counter_stats pf_stats; struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES]; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 51d4eaab6a2f..7b51ae8cf759 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -2,12 +2,6 @@ #ifndef _MLX4_STATS_ #define _MLX4_STATS_ -#ifdef MLX4_EN_PERF_STAT -#define NUM_PERF_STATS NUM_PERF_COUNTERS -#else -#define NUM_PERF_STATS 0 -#endif - #define NUM_PRIORITIES 9 #define NUM_PRIORITY_STATS 2 @@ -46,16 +40,6 @@ struct mlx4_en_port_stats { #define NUM_PORT_STATS 10 }; -struct mlx4_en_perf_stats { - u32 tx_poll; - u64 tx_pktsz_avg; - u32 inflight_avg; - u16 tx_coal_avg; - u16 rx_coal_avg; - u32 napi_quota; -#define NUM_PERF_COUNTERS 6 -}; - struct mlx4_en_xdp_stats { unsigned long rx_xdp_drop; unsigned long rx_xdp_tx; @@ -135,7 +119,7 @@ enum { }; #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ - NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \ + NUM_FLOW_STATS + NUM_PF_STATS + \ NUM_XDP_STATS + NUM_PHY_STATS) #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1187ef1375e2..394f43add85c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -300,7 +300,7 @@ static const char *resource_str(enum mlx4_resource rt) case RES_FS_RULE: return "RES_FS_RULE"; case RES_XRCD: return "RES_XRCD"; default: return "Unknown resource type !!!"; - }; + } } static void rem_slave_vlans(struct mlx4_dev *dev, int slave); diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index cbe4d9746ddf..dd890f5d7b72 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -272,19 +272,14 @@ EXPORT_SYMBOL_GPL(mlx4_srq_query); int mlx4_init_srq_table(struct mlx4_dev *dev) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; - int err; spin_lock_init(&srq_table->lock); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); if (mlx4_is_slave(dev)) return 0; - err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, - dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); - if (err) - return err; - - return 0; + return mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, + dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); } void mlx4_cleanup_srq_table(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 3e371d24c462..6e4d7bb7fea2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -6,6 +6,7 @@ config MLX5_CORE tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" depends on PCI + select AUXILIARY_BUS select NET_DEVLINK depends on VXLAN || !VXLAN depends on MLXFW || !MLXFW diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 2d477f9a8cb7..77961643d5a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -25,7 +25,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ - en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o + en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o # # Netdev extra @@ -81,7 +81,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ steering/dr_matcher.o steering/dr_rule.o \ - steering/dr_icm_pool.o \ + steering/dr_icm_pool.o steering/dr_buddy.o \ steering/dr_ste.o steering/dr_send.o \ steering/dr_cmd.o steering/dr_fw.o \ steering/dr_action.o steering/fs_dr.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e49387dbef98..50c7b9ee80c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -2142,7 +2142,6 @@ dma_pool_err: kvfree(cmd->stats); return err; } -EXPORT_SYMBOL(mlx5_cmd_init); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) { @@ -2155,11 +2154,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) dma_pool_destroy(cmd->pool); kvfree(cmd->stats); } -EXPORT_SYMBOL(mlx5_cmd_cleanup); void mlx5_cmd_set_state(struct mlx5_core_dev *dev, enum mlx5_cmdif_state cmdif_state) { dev->cmd.state = cmdif_state; } -EXPORT_SYMBOL(mlx5_cmd_set_state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 1972ddd12704..b051417ede67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -31,313 +31,484 @@ */ #include <linux/mlx5/driver.h> +#include <linux/mlx5/eswitch.h> +#include <linux/mlx5/mlx5_ifc_vdpa.h> #include "mlx5_core.h" -static LIST_HEAD(intf_list); -static LIST_HEAD(mlx5_dev_list); /* intf dev list mutex */ static DEFINE_MUTEX(mlx5_intf_mutex); +static DEFINE_IDA(mlx5_adev_ida); -struct mlx5_device_context { - struct list_head list; - struct mlx5_interface *intf; - void *context; - unsigned long state; -}; +static bool is_eth_rep_supported(struct mlx5_core_dev *dev) +{ + if (!IS_ENABLED(CONFIG_MLX5_ESWITCH)) + return false; -enum { - MLX5_INTERFACE_ADDED, - MLX5_INTERFACE_ATTACHED, -}; + if (!MLX5_ESWITCH_MANAGER(dev)) + return false; + if (!is_mdev_switchdev_mode(dev)) + return false; -void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) + return true; +} + +static bool is_eth_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) + return false; - if (!mlx5_lag_intf_add(intf, priv)) - return; + if (is_eth_rep_supported(dev)) + return false; - dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); - if (!dev_ctx) - return; + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return false; - dev_ctx->intf = intf; + if (!MLX5_CAP_GEN(dev, eth_net_offloads)) { + mlx5_core_warn(dev, "Missing eth_net_offloads capability\n"); + return false; + } - dev_ctx->context = intf->add(dev); - if (dev_ctx->context) { - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - if (intf->attach) - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); + if (!MLX5_CAP_GEN(dev, nic_flow_table)) { + mlx5_core_warn(dev, "Missing nic_flow_table capability\n"); + return false; + } - spin_lock_irq(&priv->ctx_lock); - list_add_tail(&dev_ctx->list, &priv->ctx_list); - spin_unlock_irq(&priv->ctx_lock); + if (!MLX5_CAP_ETH(dev, csum_cap)) { + mlx5_core_warn(dev, "Missing csum_cap capability\n"); + return false; } - if (!dev_ctx->context) - kfree(dev_ctx); + if (!MLX5_CAP_ETH(dev, max_lso_cap)) { + mlx5_core_warn(dev, "Missing max_lso_cap capability\n"); + return false; + } + + if (!MLX5_CAP_ETH(dev, vlan_cap)) { + mlx5_core_warn(dev, "Missing vlan_cap capability\n"); + return false; + } + + if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) { + mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n"); + return false; + } + + if (MLX5_CAP_FLOWTABLE(dev, + flow_table_properties_nic_receive.max_ft_level) < 3) { + mlx5_core_warn(dev, "max_ft_level < 3\n"); + return false; + } + + if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable)) + mlx5_core_warn(dev, "Self loop back prevention is not supported\n"); + if (!MLX5_CAP_GEN(dev, cq_moderation)) + mlx5_core_warn(dev, "CQ moderation is not supported\n"); + + return true; } -static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, - struct mlx5_priv *priv) +static bool is_vnet_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; + if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET)) + return false; - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf == intf) - return dev_ctx; - return NULL; + if (mlx5_core_is_pf(dev)) + return false; + + if (!(MLX5_CAP_GEN_64(dev, general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q)) + return false; + + if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) & + MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE)) + return false; + + if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type)) + return false; + + return true; } -void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) +static bool is_ib_rep_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) + return false; - dev_ctx = mlx5_get_device(intf, priv); - if (!dev_ctx) - return; + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) + return false; - spin_lock_irq(&priv->ctx_lock); - list_del(&dev_ctx->list); - spin_unlock_irq(&priv->ctx_lock); + if (!is_eth_rep_supported(dev)) + return false; - if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - intf->remove(dev, dev_ctx->context); + if (!MLX5_ESWITCH_MANAGER(dev)) + return false; - kfree(dev_ctx); + if (!is_mdev_switchdev_mode(dev)) + return false; + + if (mlx5_core_mp_enabled(dev)) + return false; + + return true; } -static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv) +static bool is_mp_supported(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); - - dev_ctx = mlx5_get_device(intf, priv); - if (!dev_ctx) - return; - - if (intf->attach) { - if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) - return; - if (intf->attach(dev, dev_ctx->context)) - return; - set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); - } else { - if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - return; - dev_ctx->context = intf->add(dev); - if (!dev_ctx->context) - return; - set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - } + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) + return false; + + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) + return false; + + if (is_ib_rep_supported(dev)) + return false; + + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return false; + + if (!mlx5_core_is_mp_slave(dev)) + return false; + + return true; } -void mlx5_attach_device(struct mlx5_core_dev *dev) +static bool is_ib_supported(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) + return false; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(intf, &intf_list, list) - mlx5_attach_interface(intf, priv); - mutex_unlock(&mlx5_intf_mutex); + if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV) + return false; + + if (is_ib_rep_supported(dev)) + return false; + + if (is_mp_supported(dev)) + return false; + + return true; +} + +enum { + MLX5_INTERFACE_PROTOCOL_ETH_REP, + MLX5_INTERFACE_PROTOCOL_ETH, + + MLX5_INTERFACE_PROTOCOL_IB_REP, + MLX5_INTERFACE_PROTOCOL_MPIB, + MLX5_INTERFACE_PROTOCOL_IB, + + MLX5_INTERFACE_PROTOCOL_VNET, +}; + +static const struct mlx5_adev_device { + const char *suffix; + bool (*is_supported)(struct mlx5_core_dev *dev); +} mlx5_adev_devices[] = { + [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet", + .is_supported = &is_vnet_supported }, + [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma", + .is_supported = &is_ib_supported }, + [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth", + .is_supported = &is_eth_supported }, + [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep", + .is_supported = &is_eth_rep_supported }, + [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep", + .is_supported = &is_ib_rep_supported }, + [MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport", + .is_supported = &is_mp_supported }, +}; + +int mlx5_adev_idx_alloc(void) +{ + return ida_alloc(&mlx5_adev_ida, GFP_KERNEL); } -static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv) +void mlx5_adev_idx_free(int idx) { - struct mlx5_device_context *dev_ctx; - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); - - dev_ctx = mlx5_get_device(intf, priv); - if (!dev_ctx) - return; - - if (intf->detach) { - if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) - return; - intf->detach(dev, dev_ctx->context); - clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); - } else { - if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - return; - intf->remove(dev, dev_ctx->context); - clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); - } + ida_free(&mlx5_adev_ida, idx); } -void mlx5_detach_device(struct mlx5_core_dev *dev) +int mlx5_adev_init(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(intf, &intf_list, list) - mlx5_detach_interface(intf, priv); - mutex_unlock(&mlx5_intf_mutex); + priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices), + sizeof(struct mlx5_adev *), GFP_KERNEL); + if (!priv->adev) + return -ENOMEM; + + return 0; } -bool mlx5_device_registered(struct mlx5_core_dev *dev) +void mlx5_adev_cleanup(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv; - bool found = false; + struct mlx5_priv *priv = &dev->priv; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) - if (priv == &dev->priv) - found = true; - mutex_unlock(&mlx5_intf_mutex); + kfree(priv->adev); +} + +static void adev_release(struct device *dev) +{ + struct mlx5_adev *mlx5_adev = + container_of(dev, struct mlx5_adev, adev.dev); + struct mlx5_priv *priv = &mlx5_adev->mdev->priv; + int idx = mlx5_adev->idx; - return found; + kfree(mlx5_adev); + priv->adev[idx] = NULL; } -void mlx5_register_device(struct mlx5_core_dev *dev) +static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx) +{ + const char *suffix = mlx5_adev_devices[idx].suffix; + struct auxiliary_device *adev; + struct mlx5_adev *madev; + int ret; + + madev = kzalloc(sizeof(*madev), GFP_KERNEL); + if (!madev) + return ERR_PTR(-ENOMEM); + + adev = &madev->adev; + adev->id = dev->priv.adev_idx; + adev->name = suffix; + adev->dev.parent = dev->device; + adev->dev.release = adev_release; + madev->mdev = dev; + madev->idx = idx; + + ret = auxiliary_device_init(adev); + if (ret) { + kfree(madev); + return ERR_PTR(ret); + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ERR_PTR(ret); + } + return madev; +} + +static void del_adev(struct auxiliary_device *adev) +{ + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +int mlx5_attach_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; + struct auxiliary_device *adev; + struct auxiliary_driver *adrv; + int ret = 0, i; mutex_lock(&mlx5_intf_mutex); - list_add_tail(&priv->dev_list, &mlx5_dev_list); - list_for_each_entry(intf, &intf_list, list) - mlx5_add_device(intf, priv); + for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { + if (!priv->adev[i]) { + bool is_supported = false; + + if (mlx5_adev_devices[i].is_supported) + is_supported = mlx5_adev_devices[i].is_supported(dev); + + if (!is_supported) + continue; + + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + ret = PTR_ERR(priv->adev[i]); + priv->adev[i] = NULL; + } + } else { + adev = &priv->adev[i]->adev; + adrv = to_auxiliary_drv(adev->dev.driver); + + if (adrv->resume) + ret = adrv->resume(adev); + } + if (ret) { + mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n", + i, mlx5_adev_devices[i].suffix); + + break; + } + } mutex_unlock(&mlx5_intf_mutex); + return ret; } -void mlx5_unregister_device(struct mlx5_core_dev *dev) +void mlx5_detach_device(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; - struct mlx5_interface *intf; + struct auxiliary_device *adev; + struct auxiliary_driver *adrv; + pm_message_t pm = {}; + int i; mutex_lock(&mlx5_intf_mutex); - list_for_each_entry_reverse(intf, &intf_list, list) - mlx5_remove_device(intf, priv); - list_del(&priv->dev_list); + for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { + if (!priv->adev[i]) + continue; + + adev = &priv->adev[i]->adev; + adrv = to_auxiliary_drv(adev->dev.driver); + + if (adrv->suspend) { + adrv->suspend(adev, pm); + continue; + } + + del_adev(&priv->adev[i]->adev); + priv->adev[i] = NULL; + } mutex_unlock(&mlx5_intf_mutex); } -int mlx5_register_interface(struct mlx5_interface *intf) +int mlx5_register_device(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv; - - if (!intf->add || !intf->remove) - return -EINVAL; + int ret; mutex_lock(&mlx5_intf_mutex); - list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) - mlx5_add_device(intf, priv); + dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + ret = mlx5_rescan_drivers_locked(dev); mutex_unlock(&mlx5_intf_mutex); + if (ret) + mlx5_unregister_device(dev); - return 0; + return ret; } -EXPORT_SYMBOL(mlx5_register_interface); -void mlx5_unregister_interface(struct mlx5_interface *intf) +void mlx5_unregister_device(struct mlx5_core_dev *dev) { - struct mlx5_priv *priv; - mutex_lock(&mlx5_intf_mutex); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) - mlx5_remove_device(intf, priv); - list_del(&intf->list); + dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + mlx5_rescan_drivers_locked(dev); mutex_unlock(&mlx5_intf_mutex); } -EXPORT_SYMBOL(mlx5_unregister_interface); -/* Must be called with intf_mutex held */ -static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol) +static int add_drivers(struct mlx5_core_dev *dev) { - struct mlx5_device_context *dev_ctx; - struct mlx5_interface *intf; - bool found = false; - - list_for_each_entry(intf, &intf_list, list) { - if (intf->protocol == protocol) { - dev_ctx = mlx5_get_device(intf, &mdev->priv); - if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - found = true; - break; + struct mlx5_priv *priv = &dev->priv; + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) { + bool is_supported = false; + + if (priv->adev[i]) + continue; + + if (mlx5_adev_devices[i].is_supported) + is_supported = mlx5_adev_devices[i].is_supported(dev); + + if (!is_supported) + continue; + + priv->adev[i] = add_adev(dev, i); + if (IS_ERR(priv->adev[i])) { + mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n", + i, mlx5_adev_devices[i].suffix); + /* We continue to rescan drivers and leave to the caller + * to make decision if to release everything or continue. + */ + ret = PTR_ERR(priv->adev[i]); + priv->adev[i] = NULL; } } - - return found; + return ret; } -void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) +static void delete_drivers(struct mlx5_core_dev *dev) { - mutex_lock(&mlx5_intf_mutex); - if (mlx5_has_added_dev_by_protocol(mdev, protocol)) { - mlx5_remove_dev_by_protocol(mdev, protocol); - mlx5_add_dev_by_protocol(mdev, protocol); + struct mlx5_priv *priv = &dev->priv; + bool delete_all; + int i; + + delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + + for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) { + bool is_supported = false; + + if (!priv->adev[i]) + continue; + + if (mlx5_adev_devices[i].is_supported && !delete_all) + is_supported = mlx5_adev_devices[i].is_supported(dev); + + if (is_supported) + continue; + + del_adev(&priv->adev[i]->adev); + priv->adev[i] = NULL; } - mutex_unlock(&mlx5_intf_mutex); } -/* Must be called with intf_mutex held */ -void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) +/* This function is used after mlx5_core_dev is reconfigured. + */ +int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev) { - struct mlx5_interface *intf; + struct mlx5_priv *priv = &dev->priv; - list_for_each_entry(intf, &intf_list, list) - if (intf->protocol == protocol) { - mlx5_add_device(intf, &dev->priv); - break; - } -} + lockdep_assert_held(&mlx5_intf_mutex); -/* Must be called with intf_mutex held */ -void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) -{ - struct mlx5_interface *intf; + delete_drivers(dev); + if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) + return 0; - list_for_each_entry(intf, &intf_list, list) - if (intf->protocol == protocol) { - mlx5_remove_device(intf, &dev->priv); - break; - } + return add_drivers(dev); } -static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev) +static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev) { return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | (dev->pdev->bus->number << 8) | PCI_SLOT(dev->pdev->devfn)); } -/* Must be called with intf_mutex held */ +static int next_phys_dev(struct device *dev, const void *data) +{ + struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev); + struct mlx5_core_dev *mdev = madev->mdev; + const struct mlx5_core_dev *curr = data; + + if (!mlx5_core_is_pf(mdev)) + return 0; + + if (mdev == curr) + return 0; + + if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr)) + return 0; + + return 1; +} + +/* This function is called with two flows: + * 1. During initialization of mlx5_core_dev and we don't need to lock it. + * 2. During LAG configure stage and caller holds &mlx5_intf_mutex. + */ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) { - struct mlx5_core_dev *res = NULL; - struct mlx5_core_dev *tmp_dev; - struct mlx5_priv *priv; - u32 pci_id; + struct auxiliary_device *adev; + struct mlx5_adev *madev; if (!mlx5_core_is_pf(dev)) return NULL; - pci_id = mlx5_gen_pci_id(dev); - list_for_each_entry(priv, &mlx5_dev_list, dev_list) { - tmp_dev = container_of(priv, struct mlx5_core_dev, priv); - if (!mlx5_core_is_pf(tmp_dev)) - continue; - - if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) { - res = tmp_dev; - break; - } - } + adev = auxiliary_find_device(NULL, dev, &next_phys_dev); + if (!adev) + return NULL; - return res; + madev = container_of(adev, struct mlx5_adev, adev); + put_device(&adev->dev); + return madev->mdev; } - void mlx5_dev_list_lock(void) { mutex_lock(&mlx5_intf_mutex); } - void mlx5_dev_list_unlock(void) { mutex_unlock(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index a28f95df2901..3261d0dc1104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -13,17 +13,8 @@ static int mlx5_devlink_flash_update(struct devlink *devlink, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); - const struct firmware *fw; - int err; - - err = request_firmware_direct(&fw, params->file_name, &dev->pdev->dev); - if (err) - return err; - - err = mlx5_firmware_flash(dev, fw, extack); - release_firmware(fw); - return err; + return mlx5_firmware_flash(dev, params->fw, extack); } static u8 mlx5_fw_ver_major(u32 version) @@ -52,7 +43,7 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, u32 running_fw, stored_fw; int err; - err = devlink_info_driver_name_put(req, DRIVER_NAME); + err = devlink_info_driver_name_put(req, KBUILD_MODNAME); if (err) return err; @@ -221,7 +212,7 @@ static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id, u8 eswitch_mode; bool smfs_cap; - eswitch_mode = mlx5_eswitch_mode(dev->priv.eswitch); + eswitch_mode = mlx5_eswitch_mode(dev); smfs_cap = mlx5_fs_dr_is_supported(dev); if (!smfs_cap) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index a700f3c86899..87d65f6b5310 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -247,6 +247,9 @@ const char *parse_fs_dst(struct trace_seq *p, case MLX5_FLOW_DESTINATION_TYPE_TIR: trace_seq_printf(p, "tir=%u\n", dst->tir_num); break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: + trace_seq_printf(p, "sampler_id=%u\n", dst->sampler_id); + break; case MLX5_FLOW_DESTINATION_TYPE_COUNTER: trace_seq_printf(p, "counter_id=%u\n", counter_id); break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c index 3dc9dd3f24dc..464eb3a18450 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -8,37 +8,66 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1; } -static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) +static bool mlx5_ecpf_esw_admins_host_pf(const struct mlx5_core_dev *dev) { - u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; - int err; + /* In separate host mode, PF enables itself. + * When ECPF is eswitch manager, eswitch enables host PF after + * eswitch is setup. + */ + return mlx5_core_is_ecpf_esw_manager(dev); +} + +int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); - err = mlx5_cmd_exec_in(dev, enable_hca, in); + MLX5_SET(enable_hca_in, in, function_id, 0); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; + + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + MLX5_SET(disable_hca_in, in, function_id, 0); + MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_host_pf_init(struct mlx5_core_dev *dev) +{ + int err; + + if (mlx5_ecpf_esw_admins_host_pf(dev)) + return 0; + + /* ECPF shall enable HCA for host PF in the same way a PF + * does this for its VFs when ECPF is not a eswitch manager. + */ + err = mlx5_cmd_host_pf_enable_hca(dev); if (err) - mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n", - err); + mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err); return err; } -static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev) +static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev) { - u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; int err; - MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); - err = mlx5_cmd_exec_in(dev, disable_hca, in); + if (mlx5_ecpf_esw_admins_host_pf(dev)) + return; + + err = mlx5_cmd_host_pf_disable_hca(dev); if (err) { - mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n", - err); + mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err); return; } - - err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages); - if (err) - mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n", - err); } int mlx5_ec_init(struct mlx5_core_dev *dev) @@ -46,16 +75,19 @@ int mlx5_ec_init(struct mlx5_core_dev *dev) if (!mlx5_core_is_ecpf(dev)) return 0; - /* ECPF shall enable HCA for peer PF in the same way a PF - * does this for its VFs. - */ - return mlx5_peer_pf_init(dev); + return mlx5_host_pf_init(dev); } void mlx5_ec_cleanup(struct mlx5_core_dev *dev) { + int err; + if (!mlx5_core_is_ecpf(dev)) return; - mlx5_peer_pf_cleanup(dev); + mlx5_host_pf_cleanup(dev); + + err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages); + if (err) + mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h index d3d7a00a02ac..40b6ad76dca6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h @@ -17,6 +17,9 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev); int mlx5_ec_init(struct mlx5_core_dev *dev); void mlx5_ec_cleanup(struct mlx5_core_dev *dev); +int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev); +int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev); + #else /* CONFIG_MLX5_ESWITCH */ static inline bool diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2f05b0f9de01..a1a81cfeb607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -227,6 +227,7 @@ enum mlx5e_priv_flag { MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, MLX5E_PFLAG_XDP_TX_MPWQE, MLX5E_PFLAG_SKB_TX_MPWQE, + MLX5E_PFLAG_TX_PORT_TS, MLX5E_NUM_PFLAGS, /* Keep last */ }; @@ -282,10 +283,12 @@ struct mlx5e_cq { u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; - struct mlx5e_channel *channel; + struct mlx5e_ch_stats *ch_stats; /* control */ + struct net_device *netdev; struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; @@ -329,6 +332,15 @@ struct mlx5e_tx_mpwqe { u8 inline_on; }; +struct mlx5e_skb_fifo { + struct sk_buff **fifo; + u16 *pc; + u16 *cc; + u16 mask; +}; + +struct mlx5e_ptpsq; + struct mlx5e_txqsq { /* data path */ @@ -349,11 +361,10 @@ struct mlx5e_txqsq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; - u16 skb_fifo_mask; struct mlx5e_sq_stats *stats; struct { struct mlx5e_sq_dma *dma_fifo; - struct sk_buff **skb_fifo; + struct mlx5e_skb_fifo skb_fifo; struct mlx5e_tx_wqe_info *wqe_info; } db; void __iomem *uar_map; @@ -367,14 +378,17 @@ struct mlx5e_txqsq { unsigned int hw_mtu; struct hwtstamp_config *tstamp; struct mlx5_clock *clock; + struct net_device *netdev; + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; /* control path */ struct mlx5_wq_ctrl wq_ctrl; - struct mlx5e_channel *channel; int ch_ix; int txq_ix; u32 rate_limit; struct work_struct recover_work; + struct mlx5e_ptpsq *ptpsq; } ____cacheline_aligned_in_smp; struct mlx5e_dma_info { @@ -593,7 +607,6 @@ struct mlx5e_rq { u8 map_dir; /* dma map direction */ } buff; - struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; struct mlx5e_rq_stats *stats; @@ -602,6 +615,8 @@ struct mlx5e_rq { struct mlx5e_page_cache page_cache; struct hwtstamp_config *tstamp; struct mlx5_clock *clock; + struct mlx5e_icosq *icosq; + struct mlx5e_priv *priv; mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_post_rx_wqes post_wqes; @@ -681,8 +696,11 @@ struct mlx5e_channel { int cpu; }; +struct mlx5e_port_ptp; + struct mlx5e_channels { struct mlx5e_channel **c; + struct mlx5e_port_ptp *port_ptp; unsigned int num; struct mlx5e_params params; }; @@ -697,6 +715,12 @@ struct mlx5e_channel_stats { struct mlx5e_xdpsq_stats xsksq; } ____cacheline_aligned_in_smp; +struct mlx5e_port_ptp_stats { + struct mlx5e_ch_stats ch; + struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC]; +} ____cacheline_aligned_in_smp; + enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, @@ -766,8 +790,10 @@ struct mlx5e_scratchpad { struct mlx5e_priv { /* priv data path fields - start */ - struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; + /* +1 for port ptp ts */ + struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC]; int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; + int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC]; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif @@ -802,12 +828,15 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_port_ptp_stats port_ptp_stats; u16 max_nch; u8 max_opened_tc; + bool port_ptp_opened; struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; struct notifier_block events_nb; + int num_tc_x_num_ch; struct udp_tunnel_nic_info nic_info; #ifdef CONFIG_MLX5_CORE_EN_DCB @@ -923,9 +952,17 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_xdpsq *sq, bool is_redirect); void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); +struct mlx5e_create_cq_param { + struct napi_struct *napi; + struct mlx5e_ch_stats *ch_stats; + int node; + int ix; +}; + struct mlx5e_cq_param; -int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, - struct mlx5e_cq_param *param, struct mlx5e_cq *cq); +int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, + struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, + struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq); int mlx5e_open_locked(struct net_device *netdev); @@ -974,7 +1011,17 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, struct mlx5e_modify_sq_param *p); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); +void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); +void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); void mlx5e_tx_disable_queue(struct netdev_queue *txq); +int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa); +void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq); +struct mlx5e_create_sq_param; +int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param, + struct mlx5e_create_sq_param *csp, + u32 *sqn); +void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index dc744702aee4..5749557749b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -287,8 +287,7 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); -bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type); -bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev); +u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt); #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 69a05da0e3e3..718f8c0a4f6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -37,13 +37,12 @@ int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = cq->channel->priv; u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; u8 hw_status; void *cqc; int err; - err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out); + err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out); if (err) return err; @@ -158,10 +157,8 @@ void mlx5e_health_channels_update(struct mlx5e_priv *priv) DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); } -int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn) +int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn) { - struct mlx5_core_dev *mdev = channel->mdev; - struct net_device *dev = channel->netdev; struct mlx5e_modify_sq_param msp = {}; int err; @@ -206,21 +203,22 @@ out: return err; } -int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel) +int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq, + struct mlx5e_ch_stats *stats) { u32 eqe_count; - netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", + netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", eq->core.eqn, eq->core.cons_index, eq->core.irqn); eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) return -EIO; - netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n", + netdev_err(dev, "Recovered %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn); - channel->stats->eq_rearm++; + stats->eq_rearm++; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index b9aadddfd000..018262d0164b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -7,8 +7,6 @@ #include "en.h" #include "diag/rsc_dump.h" -#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) - static inline bool cqe_syndrome_needs_recover(u8 syndrome) { return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR || @@ -42,8 +40,9 @@ struct mlx5e_err_ctx { void *ctx; }; -int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn); -int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel); +int mlx5e_health_sq_to_ready(struct mlx5_core_dev *mdev, struct net_device *dev, u32 sqn); +int mlx5e_health_channel_eq_recover(struct net_device *dev, struct mlx5_eq_comp *eq, + struct mlx5e_ch_stats *stats); int mlx5e_health_recover_channels(struct mlx5e_priv *priv); int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 38e4f19d69f8..43271a3856ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -2,6 +2,8 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #include "en/params.h" +#include "en/txrx.h" +#include "en_accel/tls_rxtx.h" static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) @@ -152,3 +154,35 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0; } + +u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) +{ + bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); + u16 stop_room; + + stop_room = mlx5e_tls_get_stop_room(mdev, params); + stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + if (is_mpwqe) + /* A MPWQE can take up to the maximum-sized WQE + all the normal + * stop room can be taken if a new packet breaks the active + * MPWQE session and allocates its WQEs right away. + */ + stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + + return stop_room; +} + +int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params) +{ + size_t sq_size = 1 << params->log_sq_size; + u16 stop_room; + + stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); + if (stop_room >= sq_size) { + netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n", + stop_room, sq_size); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index a87273e801b2..807147d97a0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -30,6 +30,7 @@ struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; bool is_mpw; + u16 stop_room; }; struct mlx5e_channel_param { @@ -40,6 +41,15 @@ struct mlx5e_channel_param { struct mlx5e_sq_param async_icosq; }; +struct mlx5e_create_sq_param { + struct mlx5_wq_ctrl *wq_ctrl; + u32 cqn; + u32 ts_cqe_to_dest_cqn; + u32 tisn; + u8 tis_lst_sz; + u8 min_inline_mode; +}; + static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params, u16 qid, enum mlx5e_rq_group group, @@ -101,6 +111,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, /* Build queue parameters */ +void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c); void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, @@ -124,4 +135,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_sq_param *param); +u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params); + #endif /* __MLX5_EN_PARAMS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c new file mode 100644 index 000000000000..351118985a57 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2020 Mellanox Technologies + +#include "en/ptp.h" +#include "en/txrx.h" +#include "lib/clock.h" + +struct mlx5e_skb_cb_hwtstamp { + ktime_t cqe_hwtstamp; + ktime_t port_hwtstamp; +}; + +void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb) +{ + memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); +} + +static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb)); + return (struct mlx5e_skb_cb_hwtstamp *)skb->cb; +} + +static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb, + struct mlx5e_ptp_cq_stats *cq_stats) +{ + struct skb_shared_hwtstamps hwts = {}; + ktime_t diff; + + diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp - + mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp); + + /* Maximal allowed diff is 1 / 128 second */ + if (diff > (NSEC_PER_SEC >> 7)) { + cq_stats->abort++; + cq_stats->abort_abs_diff_ns += diff; + return; + } + + hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp; + skb_tstamp_tx(skb, &hwts); +} + +void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, + ktime_t hwtstamp, + struct mlx5e_ptp_cq_stats *cq_stats) +{ + switch (hwtstamp_type) { + case (MLX5E_SKB_CB_CQE_HWTSTAMP): + mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp; + break; + case (MLX5E_SKB_CB_PORT_HWTSTAMP): + mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp; + break; + } + + /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as + * skb soon to be released. + */ + if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp || + !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp) + return; + + mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats); + memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp)); +} + +static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, + struct mlx5_cqe64 *cqe, + int budget) +{ + struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); + ktime_t hwtstamp; + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + ptpsq->cq_stats->err_cqe++; + goto out; + } + + hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe)); + mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP, + hwtstamp, ptpsq->cq_stats); + ptpsq->cq_stats->cqe++; + +out: + napi_consume_skb(skb, budget); +} + +static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget) +{ + struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq); + struct mlx5_cqwq *cqwq = &cq->wq; + struct mlx5_cqe64 *cqe; + int work_done = 0; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state))) + return false; + + cqe = mlx5_cqwq_get_cqe(cqwq); + if (!cqe) + return false; + + do { + mlx5_cqwq_pop(cqwq); + + mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget); + } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); + + mlx5_cqwq_update_db_record(cqwq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + return work_done == budget; +} + +static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp, + napi); + struct mlx5e_ch_stats *ch_stats = c->stats; + bool busy = false; + int work_done = 0; + int i; + + rcu_read_lock(); + + ch_stats->poll++; + + for (i = 0; i < c->num_tc; i++) { + busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget); + busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget); + } + + if (busy) { + work_done = budget; + goto out; + } + + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; + + ch_stats->arm++; + + for (i = 0; i < c->num_tc; i++) { + mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq); + mlx5e_cq_arm(&c->ptpsq[i].ts_cq); + } + +out: + rcu_read_unlock(); + + return work_done; +} + +static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix, + struct mlx5e_params *params, + struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq, int tc, + struct mlx5e_ptpsq *ptpsq) +{ + void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); + struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; + int err; + int node; + + sq->pdev = c->pdev; + sq->tstamp = c->tstamp; + sq->clock = &mdev->clock; + sq->mkey_be = c->mkey_be; + sq->netdev = c->netdev; + sq->priv = c->priv; + sq->mdev = mdev; + sq->ch_ix = c->ix; + sq->txq_ix = txq_ix; + sq->uar_map = mdev->mlx5e_res.bfreg.map; + sq->min_inline_mode = params->tx_min_inline_mode; + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + sq->stats = &c->priv->port_ptp_stats.sq[tc]; + sq->ptpsq = ptpsq; + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); + if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); + sq->stop_room = param->stop_room; + + node = dev_to_node(mlx5_core_dma_dev(mdev)); + + param->wq.db_numa_node = node; + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); + if (err) + return err; + wq->db = &wq->db[MLX5_SND_DBR]; + + err = mlx5e_alloc_txqsq_db(sq, node); + if (err) + goto err_sq_wq_destroy; + + return 0; + +err_sq_wq_destroy: + mlx5_wq_destroy(&sq->wq_ctrl); + + return err; +} + +static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) +{ + mlx5_core_destroy_sq(mdev, sqn); +} + +static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa) +{ + int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq); + + ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)), + GFP_KERNEL, numa); + if (!ptpsq->skb_fifo.fifo) + return -ENOMEM; + + ptpsq->skb_fifo.pc = &ptpsq->skb_fifo_pc; + ptpsq->skb_fifo.cc = &ptpsq->skb_fifo_cc; + ptpsq->skb_fifo.mask = wq_sz - 1; + + return 0; +} + +static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo) +{ + while (*skb_fifo->pc != *skb_fifo->cc) { + struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo); + + dev_kfree_skb_any(skb); + } +} + +static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo) +{ + mlx5e_ptp_drain_skb_fifo(skb_fifo); + kvfree(skb_fifo->fifo); +} + +static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn, + int txq_ix, struct mlx5e_ptp_params *cparams, + int tc, struct mlx5e_ptpsq *ptpsq) +{ + struct mlx5e_sq_param *sqp = &cparams->txq_sq_param; + struct mlx5e_txqsq *txqsq = &ptpsq->txqsq; + struct mlx5e_create_sq_param csp = {}; + int err; + + err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp, + txqsq, tc, ptpsq); + if (err) + return err; + + csp.tisn = tisn; + csp.tis_lst_sz = 1; + csp.cqn = txqsq->cq.mcq.cqn; + csp.wq_ctrl = &txqsq->wq_ctrl; + csp.min_inline_mode = txqsq->min_inline_mode; + csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn; + + err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn); + if (err) + goto err_free_txqsq; + + err = mlx5e_ptp_alloc_traffic_db(ptpsq, + dev_to_node(mlx5_core_dma_dev(c->mdev))); + if (err) + goto err_free_txqsq; + + return 0; + +err_free_txqsq: + mlx5e_free_txqsq(txqsq); + + return err; +} + +static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq) +{ + struct mlx5e_txqsq *sq = &ptpsq->txqsq; + struct mlx5_core_dev *mdev = sq->mdev; + + mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo); + cancel_work_sync(&sq->recover_work); + mlx5e_ptp_destroy_sq(mdev, sq->sqn); + mlx5e_free_txqsq_descs(sq); + mlx5e_free_txqsq(sq); +} + +static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + struct mlx5e_params *params = &cparams->params; + int ix_base; + int err; + int tc; + + ix_base = params->num_tc * params->num_channels; + + for (tc = 0; tc < params->num_tc; tc++) { + int txq_ix = ix_base + tc; + + err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, + cparams, tc, &c->ptpsq[tc]); + if (err) + goto close_txqsq; + } + + return 0; + +close_txqsq: + for (--tc; tc >= 0; tc--) + mlx5e_ptp_close_txqsq(&c->ptpsq[tc]); + + return err; +} + +static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_ptp_close_txqsq(&c->ptpsq[tc]); +} + +static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + struct mlx5e_params *params = &cparams->params; + struct mlx5e_create_cq_param ccp = {}; + struct dim_cq_moder ptp_moder = {}; + struct mlx5e_cq_param *cq_param; + int err; + int tc; + + ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); + ccp.ch_stats = c->stats; + ccp.napi = &c->napi; + ccp.ix = c->ix; + + cq_param = &cparams->txq_sq_param.cqp; + + for (tc = 0; tc < params->num_tc; tc++) { + struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; + + err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + if (err) + goto out_err_txqsq_cq; + } + + for (tc = 0; tc < params->num_tc; tc++) { + struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; + struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; + + err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); + if (err) + goto out_err_ts_cq; + + ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc]; + } + + return 0; + +out_err_ts_cq: + for (--tc; tc >= 0; tc--) + mlx5e_close_cq(&c->ptpsq[tc].ts_cq); + tc = params->num_tc; +out_err_txqsq_cq: + for (--tc; tc >= 0; tc--) + mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); + + return err; +} + +static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->ptpsq[tc].ts_cq); + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); +} + +static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_params *params, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq; + + mlx5e_build_sq_param_common(priv, param); + + wq = MLX5_ADDR_OF(sqc, sqc, wq); + MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); + param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); + mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); +} + +static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams, + struct mlx5e_params *orig) +{ + struct mlx5e_params *params = &cparams->params; + + params->tx_min_inline_mode = orig->tx_min_inline_mode; + params->num_channels = orig->num_channels; + params->hard_mtu = orig->hard_mtu; + params->sw_mtu = orig->sw_mtu; + params->num_tc = orig->num_tc; + + /* SQ */ + params->log_sq_size = orig->log_sq_size; + + mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param); +} + +static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c, + struct mlx5e_ptp_params *cparams) +{ + int err; + + err = mlx5e_ptp_open_cqs(c, cparams); + if (err) + return err; + + napi_enable(&c->napi); + + err = mlx5e_ptp_open_txqsqs(c, cparams); + if (err) + goto disable_napi; + + return 0; + +disable_napi: + napi_disable(&c->napi); + mlx5e_ptp_close_cqs(c); + + return err; +} + +static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c) +{ + mlx5e_ptp_close_txqsqs(c); + napi_disable(&c->napi); + mlx5e_ptp_close_cqs(c); +} + +int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, + u8 lag_port, struct mlx5e_port_ptp **cp) +{ + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_ptp_params *cparams; + struct mlx5e_port_ptp *c; + unsigned int irq; + int err; + int eqn; + + err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq); + if (err) + return err; + + c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev))); + cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL); + if (!c || !cparams) + return -ENOMEM; + + c->priv = priv; + c->mdev = priv->mdev; + c->tstamp = &priv->tstamp; + c->ix = 0; + c->pdev = mlx5_core_dma_dev(priv->mdev); + c->netdev = priv->netdev; + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + c->num_tc = params->num_tc; + c->stats = &priv->port_ptp_stats.ch; + c->irq_desc = irq_to_desc(irq); + c->lag_port = lag_port; + + netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64); + + mlx5e_ptp_build_params(c, cparams, params); + + err = mlx5e_ptp_open_queues(c, cparams); + if (unlikely(err)) + goto err_napi_del; + + *cp = c; + + kvfree(cparams); + + return 0; + +err_napi_del: + netif_napi_del(&c->napi); + + kvfree(cparams); + kvfree(c); + return err; +} + +void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c) +{ + mlx5e_ptp_close_queues(c); + netif_napi_del(&c->napi); + + kvfree(c); +} + +void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq); +} + +void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c) +{ + int tc; + + for (tc = 0; tc < c->num_tc; tc++) + mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h new file mode 100644 index 000000000000..28aa5ae118f4 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __MLX5_EN_PTP_H__ +#define __MLX5_EN_PTP_H__ + +#include "en.h" +#include "en/params.h" +#include "en_stats.h" + +struct mlx5e_ptpsq { + struct mlx5e_txqsq txqsq; + struct mlx5e_cq ts_cq; + u16 skb_fifo_cc; + u16 skb_fifo_pc; + struct mlx5e_skb_fifo skb_fifo; + struct mlx5e_ptp_cq_stats *cq_stats; +}; + +struct mlx5e_port_ptp { + /* data path */ + struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC]; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + u8 num_tc; + u8 lag_port; + + /* data path - accessed per napi poll */ + struct irq_desc *irq_desc; + struct mlx5e_ch_stats *stats; + + /* control */ + struct mlx5e_priv *priv; + struct mlx5_core_dev *mdev; + struct hwtstamp_config *tstamp; + DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES); + int ix; +}; + +struct mlx5e_ptp_params { + struct mlx5e_params params; + struct mlx5e_sq_param txq_sq_param; +}; + +int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, + u8 lag_port, struct mlx5e_port_ptp **cp); +void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c); +void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c); +void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c); + +enum { + MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0), + MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1), +}; + +void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type, + ktime_t hwtstamp, + struct mlx5e_ptp_cq_stats *cq_stats); + +void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb); +#endif /* __MLX5_EN_PTP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 9913647a1faf..d80bbd17e5f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -87,7 +87,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx) /* At this point, both the rq and the icosq are disabled */ - err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn); + err = mlx5e_health_sq_to_ready(mdev, dev, icosq->sqn); if (err) goto out; @@ -146,17 +146,16 @@ out: static int mlx5e_rx_reporter_timeout_recover(void *ctx) { - struct mlx5e_icosq *icosq; struct mlx5_eq_comp *eq; struct mlx5e_rq *rq; int err; rq = ctx; - icosq = &rq->channel->icosq; eq = rq->cq.mcq.eq; - err = mlx5e_health_channel_eq_recover(eq, rq->channel); - if (err) - clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); + + err = mlx5e_health_channel_eq_recover(rq->netdev, eq, rq->cq.ch_stats); + if (err && rq->icosq) + clear_bit(MLX5E_SQ_STATE_ENABLED, &rq->icosq->state); return err; } @@ -233,21 +232,13 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = rq->channel->priv; - struct mlx5e_icosq *icosq; - u8 icosq_hw_state; u16 wqe_counter; int wqes_sz; u8 hw_state; u16 wq_head; int err; - icosq = &rq->channel->icosq; - err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state); - if (err) - return err; - - err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state); + err = mlx5e_query_rq_state(rq->mdev, rq->rqn, &hw_state); if (err) return err; @@ -259,7 +250,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix); + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix); if (err) return err; @@ -295,9 +286,18 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; - err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); - if (err) - return err; + if (rq->icosq) { + struct mlx5e_icosq *icosq = rq->icosq; + u8 icosq_hw_state; + + err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state); + if (err) + return err; + + err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); + if (err) + return err; + } err = devlink_fmsg_obj_nest_end(fmsg); if (err) @@ -557,25 +557,29 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter, void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq) { - struct mlx5e_icosq *icosq = &rq->channel->icosq; - struct mlx5e_priv *priv = rq->channel->priv; + char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {}; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_icosq *icosq = rq->icosq; + struct mlx5e_priv *priv = rq->priv; struct mlx5e_err_ctx err_ctx = {}; err_ctx.ctx = rq; err_ctx.recover = mlx5e_rx_reporter_timeout_recover; err_ctx.dump = mlx5e_rx_reporter_dump_rq; + + if (icosq) + snprintf(icosq_str, sizeof(icosq_str), "ICOSQ: 0x%x, ", icosq->sqn); snprintf(err_str, sizeof(err_str), - "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x", - icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); + "RX timeout on channel: %d, %sRQ: 0x%x, CQ: 0x%x", + rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn); mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx); } void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq) { - struct mlx5e_priv *priv = rq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_priv *priv = rq->priv; struct mlx5e_err_ctx err_ctx = {}; err_ctx.ctx = rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 8be6eaa3eeb1..d7275c84313e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 Mellanox Technologies. */ #include "health.h" +#include "en/ptp.h" static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { @@ -15,7 +16,7 @@ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) msleep(20); } - netdev_err(sq->channel->netdev, + netdev_err(sq->netdev, "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", sq->sqn, sq->cc, sq->pc); @@ -41,8 +42,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) int err; sq = ctx; - mdev = sq->channel->mdev; - dev = sq->channel->netdev; + mdev = sq->mdev; + dev = sq->netdev; if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) return 0; @@ -68,7 +69,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) * pending WQEs. SQ can safely reset the SQ. */ - err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn); + err = mlx5e_health_sq_to_ready(mdev, dev, sq->sqn); if (err) goto out; @@ -99,8 +100,8 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx) to_ctx = ctx; sq = to_ctx->sq; eq = sq->cq.mcq.eq; - priv = sq->channel->priv; - err = mlx5e_health_channel_eq_recover(eq, sq->channel); + priv = sq->priv; + err = mlx5e_health_channel_eq_recover(sq->netdev, eq, sq->cq.ch_stats); if (!err) { to_ctx->status = 0; /* this sq recovered */ return err; @@ -141,11 +142,11 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, } static int -mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, - struct mlx5e_txqsq *sq, int tc) +mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg, + struct mlx5e_txqsq *sq, int tc) { - struct mlx5e_priv *priv = sq->channel->priv; bool stopped = netif_xmit_stopped(sq->txq); + struct mlx5e_priv *priv = sq->priv; u8 state; int err; @@ -153,14 +154,6 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; - err = devlink_fmsg_obj_nest_start(fmsg); - if (err) - return err; - - err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); - if (err) - return err; - err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc); if (err) return err; @@ -193,7 +186,24 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; - err = mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); + return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); +} + +static int +mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + struct mlx5e_txqsq *sq, int tc) +{ + int err; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); + if (err) + return err; + + err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc); if (err) return err; @@ -204,49 +214,147 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, return 0; } -static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg, - struct netlink_ext_ack *extack) +static int +mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg, + struct mlx5e_ptpsq *ptpsq, int tc) { - struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; - u32 sq_stride, sq_sz; + int err; - int i, tc, err = 0; + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; - mutex_lock(&priv->state_lock); + err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp"); + if (err) + return err; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; + err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc); + if (err) + return err; - sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); - sq_stride = MLX5_SEND_WQE_BB; + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + if (err) + return err; - err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); + err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg); if (err) - goto unlock; + return err; + + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +static int +mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg, + struct mlx5e_txqsq *txqsq) +{ + u32 sq_stride, sq_sz; + int err; err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); if (err) - goto unlock; + return err; + + sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq); + sq_stride = MLX5_SEND_WQE_BB; err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride); if (err) - goto unlock; + return err; err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz); if (err) - goto unlock; + return err; - err = mlx5e_health_cq_common_diag_fmsg(&generic_sq->cq, fmsg); + err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg); if (err) - goto unlock; + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int +mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg, + struct mlx5e_ptpsq *ptpsq) +{ + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS"); + if (err) + return err; + + err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg); + if (err) + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int +mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_txqsq *generic_sq = priv->txq2sq[0]; + struct mlx5e_ptpsq *generic_ptpsq; + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); + if (err) + return err; + + err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq); + if (err) + return err; + + generic_ptpsq = priv->channels.port_ptp ? + &priv->channels.port_ptp->ptpsq[0] : + NULL; + if (!generic_ptpsq) + goto out; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP"); + if (err) + return err; + + err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq); + if (err) + return err; + + err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq); + if (err) + return err; err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) + return err; + +out: + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp; + + int i, tc, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg); if (err) goto unlock; @@ -265,6 +373,19 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, goto unlock; } } + + if (!ptp_ch) + goto close_sqs_nest; + + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, + &ptp_ch->ptpsq[tc], + tc); + if (err) + goto unlock; + } + +close_sqs_nest: err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) goto unlock; @@ -338,6 +459,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { + struct mlx5e_port_ptp *ptp_ch = priv->channels.port_ptp; struct mlx5_rsc_key key = {}; int i, tc, err; @@ -373,6 +495,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, return err; } } + + if (ptp_ch) { + for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; + + err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); + if (err) + return err; + } + } + return devlink_fmsg_arr_pair_nest_end(fmsg); } @@ -396,8 +529,8 @@ static int mlx5e_tx_reporter_dump(struct devlink_health_reporter *reporter, void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = sq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_priv *priv = sq->priv; struct mlx5e_err_ctx err_ctx = {}; err_ctx.ctx = sq; @@ -410,9 +543,9 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) { - struct mlx5e_priv *priv = sq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; struct mlx5e_tx_timeout_ctx to_ctx = {}; + struct mlx5e_priv *priv = sq->priv; struct mlx5e_err_ctx err_ctx = {}; to_ctx.sq = sq; @@ -421,7 +554,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) err_ctx.dump = mlx5e_tx_reporter_dump_sq; snprintf(err_str, sizeof(err_str), "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u", - sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, + sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - sq->txq->trans_start)); mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 07ee1d236ab3..7943eb30b837 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -24,6 +24,8 @@ #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) + enum mlx5e_icosq_wqe_type { MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_UMR_RX, @@ -250,21 +252,24 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, dma->type = map_type; } -static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i) +static inline +struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_skb_fifo *fifo, u16 i) { - return &sq->db.skb_fifo[i & sq->skb_fifo_mask]; + return &fifo->fifo[i & fifo->mask]; } -static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb) +static inline +void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) { - struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++); + struct sk_buff **skb_item = mlx5e_skb_fifo_get(fifo, (*fifo->pc)++); *skb_item = skb; } -static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq) +static inline +struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) { - return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++); + return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++); } static inline void @@ -308,7 +313,7 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); - netdev_err(cq->channel->netdev, + netdev_err(cq->netdev, "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", cq->mcq.cqn, ci, qn, get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index ae90d533a350..2e3e78b0f333 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -366,7 +366,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_wqe_info *wi, u32 *xsk_frames, - bool recycle) + bool recycle, + struct xdp_frame_bulk *bq) { struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; u16 i; @@ -379,7 +380,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, /* XDP_TX from the XSK RQ and XDP_REDIRECT */ dma_unmap_single(sq->pdev, xdpi.frame.dma_addr, xdpi.frame.xdpf->len, DMA_TO_DEVICE); - xdp_return_frame(xdpi.frame.xdpf); + xdp_return_frame_bulk(xdpi.frame.xdpf, bq); break; case MLX5E_XDP_XMIT_MODE_PAGE: /* XDP_TX from the regular RQ */ @@ -397,12 +398,15 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) { + struct xdp_frame_bulk bq; struct mlx5e_xdpsq *sq; struct mlx5_cqe64 *cqe; u32 xsk_frames = 0; u16 sqcc; int i; + xdp_frame_bulk_init(&bq); + sq = container_of(cq, struct mlx5e_xdpsq, cq); if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) @@ -434,7 +438,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) sqcc += wi->num_wqebbs; - mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true); + mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq); } while (!last_wqe); if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { @@ -447,6 +451,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) } } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + xdp_flush_frame_bulk(&bq); + if (xsk_frames) xsk_tx_completed(sq->xsk_pool, xsk_frames); @@ -463,8 +469,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) { + struct xdp_frame_bulk bq; u32 xsk_frames = 0; + xdp_frame_bulk_init(&bq); + + rcu_read_lock(); /* need for xdp_return_frame_bulk */ + while (sq->cc != sq->pc) { struct mlx5e_xdp_wqe_info *wi; u16 ci; @@ -474,9 +485,12 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) sq->cc += wi->num_wqebbs; - mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false); + mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq); } + xdp_flush_frame_bulk(&bq); + rcu_read_unlock(); + if (xsk_frames) xsk_tx_completed(sq->xsk_pool, xsk_frames); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index be3465ba38ca..d87c345878d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -49,8 +49,11 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_channel *c) { struct mlx5e_channel_param *cparam; + struct mlx5e_create_cq_param ccp; int err; + mlx5e_build_create_cq_param(&ccp, c); + if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) return -EINVAL; @@ -60,7 +63,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, mlx5e_build_xsk_cparam(priv, params, xsk, cparam); - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq); + err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, + &c->xskrq.cq); if (unlikely(err)) goto err_free_cparam; @@ -68,7 +72,8 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (unlikely(err)) goto err_close_rx_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq); + err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, + &c->xsksq.cq); if (unlikely(err)) goto err_close_rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index b140e13fdcc8..d16def68ecff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -13,20 +13,20 @@ struct mlx5e_dump_wqe { (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) static u8 -mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags, +mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags, unsigned int sync_len) { /* Given the MTU and sync_len, calculates an upper bound for the * number of DUMP WQEs needed for the TX resync of a record. */ - return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu); + return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu)); } -u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq) +u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params) { u16 num_dumps, stop_room = 0; - num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); + num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS); stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 7521c9be735b..ee04e916fa21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state { u32 tls_tisn; }; -u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq); +u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params); bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, struct sk_buff *skb, int datalen, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 6982b193ee8a..2b51d3222ca1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -276,7 +276,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) goto err_out; - if (mlx5_accel_is_ktls_tx(sq->channel->mdev)) + if (mlx5_accel_is_ktls_tx(sq->mdev)) return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); /* FPGA */ @@ -385,15 +385,13 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; } -u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) +u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - struct mlx5_core_dev *mdev = sq->channel->mdev; - if (!mlx5_accel_is_tls_device(mdev)) return 0; if (mlx5_accel_is_ktls_device(mdev)) - return mlx5e_ktls_get_stop_room(sq); + return mlx5e_ktls_get_stop_room(params); /* FPGA */ /* Resync SKB. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 5f162ad2ee8f..9923132c9440 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -43,7 +43,7 @@ #include "en.h" #include "en/txrx.h" -u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq); +u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); @@ -71,7 +71,7 @@ mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; static inline void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} -static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) +static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d25a56ec6876..d9076d543104 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -32,6 +32,7 @@ #include "en.h" #include "en/port.h" +#include "en/params.h" #include "en/xsk/pool.h" #include "lib/clock.h" @@ -40,9 +41,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, { struct mlx5_core_dev *mdev = priv->mdev; - strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, DRIVER_VERSION, - sizeof(drvinfo->version)); + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), @@ -369,6 +368,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, new_channels.params.log_rq_mtu_frames = log_rq_size; new_channels.params.log_sq_size = log_sq_size; + err = mlx5e_validate_params(priv, &new_channels.params); + if (err) + goto unlock; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; goto unlock; @@ -1941,6 +1944,38 @@ static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable) return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable); } +static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err; + + if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) + return -EOPNOTSUPP; + + new_channels.params = priv->channels.params; + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable); + /* No need to verify SQ stop room as + * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both + * has the same log_sq_size. + */ + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + err = mlx5e_num_channels_changed(priv); + goto out; + } + + err = mlx5e_safe_switch_channels(priv, &new_channels, + mlx5e_num_channels_changed_ctx, NULL); +out: + if (!err) + priv->port_ptp_opened = true; + + return err; +} + static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { { "rx_cqe_moder", set_pflag_rx_cqe_based_moder }, { "tx_cqe_moder", set_pflag_tx_cqe_based_moder }, @@ -1949,6 +1984,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe }, { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe }, + { "tx_port_ts", set_pflag_tx_port_ts }, }; static int mlx5e_handle_pflag(struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1f48f99c0997..fa8149f6eb08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -772,25 +772,31 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = { }; -bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type) +u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt) +{ + return ttc_tunnel_rules[tt].proto; +} + +static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type) { switch (proto_type) { case IPPROTO_GRE: return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); case IPPROTO_IPIP: case IPPROTO_IPV6: - return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip); + return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || + MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); default: return false; } } -bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) +static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) { int tt; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto)) + if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto)) return true; } return false; @@ -798,7 +804,7 @@ bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev) bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) { - return (mlx5e_any_tunnel_proto_supported(mdev) && + return (mlx5e_tunnel_any_rx_proto_supported(mdev) && MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); } @@ -899,8 +905,8 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = params->inner_ttc->ft.t; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (!mlx5e_tunnel_proto_supported(priv->mdev, - ttc_tunnel_rules[tt].proto)) + if (!mlx5e_tunnel_proto_supported_rx(priv->mdev, + ttc_tunnel_rules[tt].proto)) continue; trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_tunnel_rules[tt].etype, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ebce97921e03..03831650f655 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -64,6 +64,7 @@ #include "en/hv_vhca_stats.h" #include "en/devlink.h" #include "lib/mlx5.h" +#include "en/ptp.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -412,9 +413,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->wq_type = params->rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; + rq->priv = c->priv; rq->tstamp = c->tstamp; rq->clock = &mdev->clock; - rq->channel = c; + rq->icosq = &c->icosq; rq->ix = c->ix; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); @@ -434,7 +436,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq_xdp_ix = rq->ix; if (xsk) rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK; - err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix); + err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0); if (err < 0) goto err_rq_xdp_prog; @@ -613,14 +615,11 @@ err_rq_xdp_prog: static void mlx5e_free_rq(struct mlx5e_rq *rq) { - struct mlx5e_channel *c = rq->channel; - struct bpf_prog *old_prog = NULL; + struct bpf_prog *old_prog; int i; - /* drop_rq has neither channel nor xdp_prog. */ - if (c) - old_prog = rcu_dereference_protected(rq->xdp_prog, - lockdep_is_held(&c->priv->state_lock)); + old_prog = rcu_dereference_protected(rq->xdp_prog, + lockdep_is_held(&rq->priv->state_lock)); if (old_prog) bpf_prog_put(old_prog); @@ -720,9 +719,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) { - struct mlx5e_channel *c = rq->channel; - struct mlx5e_priv *priv = c->priv; - struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; @@ -751,8 +748,7 @@ static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) { - struct mlx5e_channel *c = rq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = rq->mdev; void *in; void *rqc; int inlen; @@ -786,7 +782,6 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) { unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); - struct mlx5e_channel *c = rq->channel; u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); @@ -797,8 +792,8 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) msleep(20); } while (time_before(jiffies, exp_time)); - netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", - c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); + netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", + rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); mlx5e_reporter_rx_timeout(rq); return -ETIMEDOUT; @@ -913,7 +908,7 @@ err_free_rq: void mlx5e_activate_rq(struct mlx5e_rq *rq) { set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - mlx5e_trigger_irq(&rq->channel->icosq); + mlx5e_trigger_irq(rq->icosq); } void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -925,7 +920,7 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { cancel_work_sync(&rq->dim.work); - cancel_work_sync(&rq->channel->icosq.recover_work); + cancel_work_sync(&rq->icosq->recover_work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1089,14 +1084,14 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq) mlx5_wq_destroy(&sq->wq_ctrl); } -static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) +void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) { kvfree(sq->db.wqe_info); - kvfree(sq->db.skb_fifo); + kvfree(sq->db.skb_fifo.fifo); kvfree(sq->db.dma_fifo); } -static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) +int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; @@ -1104,46 +1099,26 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sizeof(*sq->db.dma_fifo)), GFP_KERNEL, numa); - sq->db.skb_fifo = kvzalloc_node(array_size(df_sz, - sizeof(*sq->db.skb_fifo)), + sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz, + sizeof(*sq->db.skb_fifo.fifo)), GFP_KERNEL, numa); sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.wqe_info)), GFP_KERNEL, numa); - if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) { + if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) { mlx5e_free_txqsq_db(sq); return -ENOMEM; } sq->dma_fifo_mask = df_sz - 1; - sq->skb_fifo_mask = df_sz - 1; - - return 0; -} - -static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size) -{ - int sq_size = 1 << log_sq_size; - sq->stop_room = mlx5e_tls_get_stop_room(sq); - sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); - if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) - /* A MPWQE can take up to the maximum-sized WQE + all the normal - * stop room can be taken if a new packet breaks the active - * MPWQE session and allocates its WQEs right away. - */ - sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); - - if (WARN_ON(sq->stop_room >= sq_size)) { - netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n", - sq->stop_room, sq_size); - return -ENOSPC; - } + sq->db.skb_fifo.pc = &sq->skb_fifo_pc; + sq->db.skb_fifo.cc = &sq->skb_fifo_cc; + sq->db.skb_fifo.mask = df_sz - 1; return 0; } -static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@ -1160,7 +1135,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->tstamp = c->tstamp; sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; - sq->channel = c; + sq->netdev = c->netdev; + sq->mdev = c->mdev; + sq->priv = c->priv; sq->ch_ix = c->ix; sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; @@ -1176,9 +1153,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, set_bit(MLX5E_SQ_STATE_TLS, &sq->state); if (param->is_mpw) set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); - err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size); - if (err) - return err; + sq->stop_room = param->stop_room; param->wq.db_numa_node = cpu_to_node(c->cpu); err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); @@ -1201,20 +1176,12 @@ err_sq_wq_destroy: return err; } -static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) { mlx5e_free_txqsq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); } -struct mlx5e_create_sq_param { - struct mlx5_wq_ctrl *wq_ctrl; - u32 cqn; - u32 tisn; - u8 tis_lst_sz; - u8 min_inline_mode; -}; - static int mlx5e_create_sq(struct mlx5_core_dev *mdev, struct mlx5e_sq_param *param, struct mlx5e_create_sq_param *csp, @@ -1239,6 +1206,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz); MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); MLX5_SET(sqc, sqc, cqn, csp->cqn); + MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn); if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); @@ -1296,10 +1264,10 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) mlx5_core_destroy_sq(mdev, sqn); } -static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, - struct mlx5e_sq_param *param, - struct mlx5e_create_sq_param *csp, - u32 *sqn) +int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, + struct mlx5e_sq_param *param, + struct mlx5e_create_sq_param *csp, + u32 *sqn) { struct mlx5e_modify_sq_param msp = {0}; int err; @@ -1362,7 +1330,7 @@ err_free_txqsq: void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { - sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); + sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); @@ -1375,7 +1343,7 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq) __netif_tx_unlock_bh(txq); } -static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; @@ -1400,8 +1368,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) { - struct mlx5e_channel *c = sq->channel; - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_rate_limit rl = {0}; cancel_work_sync(&sq->dim.work); @@ -1415,7 +1382,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) mlx5e_free_txqsq(sq); } -static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) +void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) { struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, recover_work); @@ -1542,10 +1509,11 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) mlx5e_free_xdpsq(sq); } -static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, +static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) { + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_cq *mcq = &cq->mcq; int eqn_not_used; unsigned int irqn; @@ -1578,25 +1546,27 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, } cq->mdev = mdev; + cq->netdev = priv->netdev; + cq->priv = priv; return 0; } -static int mlx5e_alloc_cq(struct mlx5e_channel *c, +static int mlx5e_alloc_cq(struct mlx5e_priv *priv, struct mlx5e_cq_param *param, + struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq) { - struct mlx5_core_dev *mdev = c->priv->mdev; int err; - param->wq.buf_numa_node = cpu_to_node(c->cpu); - param->wq.db_numa_node = cpu_to_node(c->cpu); - param->eq_ix = c->ix; + param->wq.buf_numa_node = ccp->node; + param->wq.db_numa_node = ccp->node; + param->eq_ix = ccp->ix; - err = mlx5e_alloc_cq_common(mdev, param, cq); + err = mlx5e_alloc_cq_common(priv, param, cq); - cq->napi = &c->napi; - cq->channel = c; + cq->napi = ccp->napi; + cq->ch_stats = ccp->ch_stats; return err; } @@ -1660,13 +1630,14 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) mlx5_core_destroy_cq(cq->mdev, &cq->mcq); } -int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder, - struct mlx5e_cq_param *param, struct mlx5e_cq *cq) +int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, + struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, + struct mlx5e_cq *cq) { - struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_core_dev *mdev = priv->mdev; int err; - err = mlx5e_alloc_cq(c, param, cq); + err = mlx5e_alloc_cq(priv, param, ccp, cq); if (err) return err; @@ -1692,14 +1663,15 @@ void mlx5e_close_cq(struct mlx5e_cq *cq) static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, + struct mlx5e_create_cq_param *ccp, struct mlx5e_channel_param *cparam) { int err; int tc; for (tc = 0; tc < c->num_tc; tc++) { - err = mlx5e_open_cq(c, params->tx_cq_moderation, - &cparam->txq_sq.cqp, &c->sq[tc].cq); + err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, + ccp, &c->sq[tc].cq); if (err) goto err_close_tx_cqs; } @@ -1834,35 +1806,52 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c) +{ + *ccp = (struct mlx5e_create_cq_param) { + .napi = &c->napi, + .ch_stats = c->stats, + .node = cpu_to_node(c->cpu), + .ix = c->ix, + }; +} + static int mlx5e_open_queues(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { struct dim_cq_moder icocq_moder = {0, 0}; + struct mlx5e_create_cq_param ccp; int err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq); + mlx5e_build_create_cq_param(&ccp, c); + + err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, + &c->async_icosq.cq); if (err) return err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq); + err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, + &c->icosq.cq); if (err) goto err_close_async_icosq_cq; - err = mlx5e_open_tx_cqs(c, params, cparam); + err = mlx5e_open_tx_cqs(c, params, &ccp, cparam); if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq); + err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, + &c->xdpsq.cq); if (err) goto err_close_tx_cqs; - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq); + err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, + &c->rq.cq); if (err) goto err_close_xdp_tx_cqs; - err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, - &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0; + err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, + &ccp, &c->rq_xdpsq.cq) : 0; if (err) goto err_close_rx_cq; @@ -2225,6 +2214,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(sqc, sqc, allow_swp, allow_swp); param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); + param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params); mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); } @@ -2384,6 +2374,13 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } + if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) { + err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port, + &chs->port_ptp); + if (err) + goto err_close_channels; + } + mlx5e_health_channels_update(priv); kvfree(cparam); return 0; @@ -2405,6 +2402,9 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs) for (i = 0; i < chs->num; i++) mlx5e_activate_channel(chs->c[i]); + + if (chs->port_ptp) + mlx5e_ptp_activate_channel(chs->port_ptp); } #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ @@ -2431,6 +2431,9 @@ static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) { int i; + if (chs->port_ptp) + mlx5e_ptp_deactivate_channel(chs->port_ptp); + for (i = 0; i < chs->num; i++) mlx5e_deactivate_channel(chs->c[i]); } @@ -2439,6 +2442,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; + if (chs->port_ptp) + mlx5e_port_ptp_close(chs->port_ptp); + for (i = 0; i < chs->num; i++) mlx5e_close_channel(chs->c[i]); @@ -2924,6 +2930,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) nch = priv->channels.params.num_channels; ntc = priv->channels.params.num_tc; num_txqs = nch * ntc; + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) + num_txqs += ntc; num_rxqs = nch * priv->profile->rq_groups; mlx5e_netdev_set_tcs(netdev, nch, ntc); @@ -2997,14 +3005,13 @@ MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) { - int i, ch; + int i, ch, tc, num_tc; ch = priv->channels.num; + num_tc = priv->channels.params.num_tc; for (i = 0; i < ch; i++) { - int tc; - - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_txqsq *sq = &c->sq[tc]; @@ -3012,10 +3019,29 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) priv->channel_tc2realtxq[i][tc] = i + tc * ch; } } + + if (!priv->channels.port_ptp) + return; + + for (tc = 0; tc < num_tc; tc++) { + struct mlx5e_port_ptp *c = priv->channels.port_ptp; + struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; + + priv->txq2sq[sq->txq_ix] = sq; + priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc; + } +} + +static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) +{ + /* Sync with mlx5e_select_queue. */ + WRITE_ONCE(priv->num_tc_x_num_ch, + priv->channels.params.num_tc * priv->channels.num); } void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { + mlx5e_update_num_tc_x_num_ch(priv); mlx5e_build_txq_maps(priv); mlx5e_activate_channels(&priv->channels); mlx5e_xdp_tx_enable(priv); @@ -3135,7 +3161,7 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, mlx5_set_port_admin_status(mdev, state); - if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) + if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY) return; if (state == MLX5_PORT_UP) @@ -3219,6 +3245,11 @@ int mlx5e_close(struct net_device *netdev) return err; } +static void mlx5e_free_drop_rq(struct mlx5e_rq *rq) +{ + mlx5_wq_destroy(&rq->wq_ctrl); +} + static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq, struct mlx5e_rq_param *param) @@ -3242,14 +3273,16 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, return 0; } -static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, +static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv, struct mlx5e_cq *cq, struct mlx5e_cq_param *param) { + struct mlx5_core_dev *mdev = priv->mdev; + param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev)); - return mlx5e_alloc_cq_common(mdev, param, cq); + return mlx5e_alloc_cq_common(priv, param, cq); } int mlx5e_open_drop_rq(struct mlx5e_priv *priv, @@ -3263,7 +3296,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, mlx5e_build_drop_rq_param(priv, &rq_param); - err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); + err = mlx5e_alloc_drop_cq(priv, cq, &cq_param); if (err) return err; @@ -3286,7 +3319,7 @@ int mlx5e_open_drop_rq(struct mlx5e_priv *priv, return 0; err_free_rq: - mlx5e_free_rq(drop_rq); + mlx5e_free_drop_rq(drop_rq); err_destroy_cq: mlx5e_destroy_cq(cq); @@ -3300,7 +3333,7 @@ err_free_cq: void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); - mlx5e_free_rq(drop_rq); + mlx5e_free_drop_rq(drop_rq); mlx5e_destroy_cq(&drop_rq->cq); mlx5e_free_cq(&drop_rq->cq); } @@ -3999,6 +4032,9 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; + err = mlx5e_validate_params(priv, &new_channels.params); + if (err) + goto out; if (params->xdp_prog && !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) { @@ -4251,6 +4287,20 @@ int mlx5e_get_vf_stats(struct net_device *dev, } #endif +static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type) +{ + switch (proto_type) { + case IPPROTO_GRE: + return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); + case IPPROTO_IPIP: + case IPPROTO_IPV6: + return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || + MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx)); + default: + return false; + } +} + static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev, struct sk_buff *skb) { @@ -4293,7 +4343,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, break; case IPPROTO_IPIP: case IPPROTO_IPV6: - if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) + if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP)) return features; break; case IPPROTO_UDP: @@ -4342,6 +4392,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, tx_timeout_work); + struct net_device *netdev = priv->netdev; int i; rtnl_lock(); @@ -4350,9 +4401,9 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; - for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { struct netdev_queue *dev_queue = - netdev_get_tx_queue(priv->netdev, i); + netdev_get_tx_queue(netdev, i); struct mlx5e_txqsq *sq = priv->txq2sq[i]; if (!netif_xmit_stopped(dev_queue)) @@ -4412,7 +4463,7 @@ static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog struct bpf_prog *old_prog; old_prog = rcu_replace_pointer(rq->xdp_prog, prog, - lockdep_is_held(&rq->channel->priv->state_lock)); + lockdep_is_held(&rq->priv->state_lock)); if (old_prog) bpf_prog_put(old_prog); } @@ -4597,31 +4648,6 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_devlink_port = mlx5e_get_devlink_port, }; -static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) -{ - if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return -EOPNOTSUPP; - if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || - !MLX5_CAP_GEN(mdev, nic_flow_table) || - !MLX5_CAP_ETH(mdev, csum_cap) || - !MLX5_CAP_ETH(mdev, max_lso_cap) || - !MLX5_CAP_ETH(mdev, vlan_cap) || - !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) || - MLX5_CAP_FLOWTABLE(mdev, - flow_table_properties_nic_receive.max_ft_level) - < 3) { - mlx5_core_warn(mdev, - "Not creating net device, some required device capabilities are missing\n"); - return -EOPNOTSUPP; - } - if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) - mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); - if (!MLX5_CAP_GEN(mdev, cq_moderation)) - mlx5_core_warn(mdev, "CQ moderation is not supported\n"); - - return 0; -} - void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { @@ -4877,6 +4903,17 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) priv->netdev->udp_tunnel_nic_info = &priv->nic_info; } +static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) +{ + int tt; + + for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { + if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt))) + return true; + } + return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); +} + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4922,8 +4959,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_vxlan_set_netdev_info(priv); - if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || - mlx5e_any_tunnel_proto_supported(mdev)) { + if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; @@ -4940,7 +4976,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; } - if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) { + if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) { netdev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; netdev->hw_enc_features |= NETIF_F_GSO_GRE | @@ -4949,7 +4985,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM; } - if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) { + if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) { netdev->hw_features |= NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6; netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 | @@ -5334,10 +5370,14 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, void *ppriv) { struct net_device *netdev; + unsigned int ptp_txqs = 0; int err; + if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) + ptp_txqs = profile->max_tc; + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * profile->max_tc, + nch * profile->max_tc + ptp_txqs, nch * profile->rq_groups); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); @@ -5441,13 +5481,12 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv) free_netdev(netdev); } -/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying - * hardware contexts and to connect it to the current netdev. - */ -static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) +static int mlx5e_resume(struct auxiliary_device *adev) { - struct mlx5e_priv *priv = vpriv; + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = edev->mdev; int err; if (netif_device_present(netdev)) @@ -5466,109 +5505,111 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) return 0; } -static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) +static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state) { - struct mlx5e_priv *priv = vpriv; + struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); struct net_device *netdev = priv->netdev; - -#ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) - return; -#endif + struct mlx5_core_dev *mdev = priv->mdev; if (!netif_device_present(netdev)) - return; + return -ENODEV; mlx5e_detach_netdev(priv); mlx5e_destroy_mdev_resources(mdev); + return 0; } -static void *mlx5e_add(struct mlx5_core_dev *mdev) +static int mlx5e_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; struct net_device *netdev; + pm_message_t state = {}; void *priv; int err; int nch; - err = mlx5e_check_required_hca_cap(mdev); - if (err) - return NULL; - -#ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev) && - mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) { - mlx5e_rep_register_vport_reps(mdev); - return mdev; - } -#endif - nch = mlx5e_get_max_num_channels(mdev); netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - return NULL; + return -ENOMEM; } dev_net_set(netdev, mlx5_core_net(mdev)); priv = netdev_priv(netdev); + dev_set_drvdata(&adev->dev, priv); - err = mlx5e_attach(mdev, priv); + err = mlx5e_resume(adev); if (err) { - mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err); + mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err); goto err_destroy_netdev; } err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_detach; + goto err_resume; } mlx5e_devlink_port_type_eth_set(priv); mlx5e_dcbnl_init_app(priv); - return priv; + return 0; -err_detach: - mlx5e_detach(mdev, priv); +err_resume: + mlx5e_suspend(adev, state); err_destroy_netdev: mlx5e_destroy_netdev(priv); - return NULL; + return err; } -static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) +static void mlx5e_remove(struct auxiliary_device *adev) { - struct mlx5e_priv *priv; + struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev); + pm_message_t state = {}; -#ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) { - mlx5e_rep_unregister_vport_reps(mdev); - return; - } -#endif - priv = vpriv; mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); - mlx5e_detach(mdev, vpriv); + mlx5e_suspend(adev, state); mlx5e_destroy_netdev(priv); } -static struct mlx5_interface mlx5e_interface = { - .add = mlx5e_add, - .remove = mlx5e_remove, - .attach = mlx5e_attach, - .detach = mlx5e_detach, - .protocol = MLX5_INTERFACE_PROTOCOL_ETH, +static const struct auxiliary_device_id mlx5e_id_table[] = { + { .name = MLX5_ADEV_NAME ".eth", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table); + +static struct auxiliary_driver mlx5e_driver = { + .name = "eth", + .probe = mlx5e_probe, + .remove = mlx5e_remove, + .suspend = mlx5e_suspend, + .resume = mlx5e_resume, + .id_table = mlx5e_id_table, }; -void mlx5e_init(void) +int mlx5e_init(void) { + int ret; + mlx5e_ipsec_build_inverse_table(); mlx5e_build_ptys2ethtool_map(); - mlx5_register_interface(&mlx5e_interface); + ret = mlx5e_rep_init(); + if (ret) + return ret; + + ret = auxiliary_driver_register(&mlx5e_driver); + if (ret) + mlx5e_rep_cleanup(); + return ret; } void mlx5e_cleanup(void) { - mlx5_unregister_interface(&mlx5e_interface); + auxiliary_driver_unregister(&mlx5e_driver); + mlx5e_rep_cleanup(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 67247c33b9fd..989c70c1eda3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -64,7 +64,6 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->driver, mlx5e_rep_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), fw_rev_min(mdev), @@ -1316,16 +1315,48 @@ static const struct mlx5_eswitch_rep_ops rep_ops = { .get_proto_dev = mlx5e_vport_rep_get_proto_dev }; -void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) +static int mlx5e_rep_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = edev->mdev; + struct mlx5_eswitch *esw; + esw = mdev->priv.eswitch; mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH); + return 0; } -void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) +static void mlx5e_rep_remove(struct auxiliary_device *adev) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_adev *vdev = container_of(adev, struct mlx5_adev, adev); + struct mlx5_core_dev *mdev = vdev->mdev; + struct mlx5_eswitch *esw; + esw = mdev->priv.eswitch; mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); } + +static const struct auxiliary_device_id mlx5e_rep_id_table[] = { + { .name = MLX5_ADEV_NAME ".eth-rep", }, + {}, +}; + +MODULE_DEVICE_TABLE(auxiliary, mlx5e_rep_id_table); + +static struct auxiliary_driver mlx5e_rep_driver = { + .name = "eth-rep", + .probe = mlx5e_rep_probe, + .remove = mlx5e_rep_remove, + .id_table = mlx5e_rep_id_table, +}; + +int mlx5e_rep_init(void) +{ + return auxiliary_driver_register(&mlx5e_rep_driver); +} + +void mlx5e_rep_cleanup(void) +{ + auxiliary_driver_unregister(&mlx5e_rep_driver); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 8932c387d46a..988195ab1c54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -203,8 +203,8 @@ struct mlx5e_rep_sq { struct list_head list; }; -void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev); -void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev); +int mlx5e_rep_init(void); +void mlx5e_rep_cleanup(void); int mlx5e_rep_bond_init(struct mlx5e_rep_priv *rpriv); void mlx5e_rep_bond_cleanup(struct mlx5e_rep_priv *rpriv); int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev, @@ -232,6 +232,8 @@ static inline bool mlx5e_eswitch_rep(struct net_device *netdev) static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} +static inline int mlx5e_rep_init(void) { return 0; }; +static inline void mlx5e_rep_cleanup(void) {}; #endif static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 6628a0197b4e..7f5851c61218 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -52,7 +52,6 @@ #include "en/xsk/rx.h" #include "en/health.h" #include "en/params.h" -#include "en/txrx.h" static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, @@ -503,7 +502,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[0]; - struct mlx5e_icosq *sq = &rq->channel->icosq; + struct mlx5e_icosq *sq = rq->icosq; struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); @@ -670,13 +669,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) sqcc += wi->num_wqebbs; if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { - netdev_WARN_ONCE(cq->channel->netdev, + netdev_WARN_ONCE(cq->netdev, "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - queue_work(cq->channel->priv->wq, &sq->recover_work); + queue_work(cq->priv->wq, &sq->recover_work); break; } @@ -697,7 +696,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) break; #endif default: - netdev_WARN_ONCE(cq->channel->netdev, + netdev_WARN_ONCE(cq->netdev, "Bad WQE type in ICOSQ WQE info: 0x%x\n", wi->wqe_type); } @@ -713,9 +712,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { - struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq; u8 umr_completed = rq->mpwqe.umr_completed; + struct mlx5e_icosq *sq = rq->icosq; int alloc_err = 0; u8 missing, i; u16 head; @@ -1218,11 +1217,12 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; + struct mlx5e_priv *priv = rq->priv; if (cqe_syndrome_needs_recover(err_cqe->syndrome) && !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); - queue_work(rq->channel->priv->wq, &rq->recover_work); + queue_work(priv->wq, &rq->recover_work); } } @@ -1771,8 +1771,9 @@ wq_free_wqe: int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) { + struct net_device *netdev = rq->netdev; struct mlx5_core_dev *mdev = rq->mdev; - struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = rq->priv; switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -1784,15 +1785,15 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe; + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) { - netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); + netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n"); return -EINVAL; } #endif if (!rq->handle_rx_cqe) { - netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n"); + netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); return -EINVAL; } break; @@ -1807,13 +1808,13 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool #ifdef CONFIG_MLX5_EN_IPSEC if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && - c->priv->ipsec) + priv->ipsec) rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; else #endif - rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe; + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe; if (!rq->handle_rx_cqe) { - netdev_err(c->netdev, "RX handler of RQ is not set\n"); + netdev_err(netdev, "RX handler of RQ is not set\n"); return -EINVAL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 78f6a6f0a7e0..2cf2042b37c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -248,6 +248,178 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) return idx; } +static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s, + struct mlx5e_xdpsq_stats *xdpsq_red_stats) +{ + s->tx_xdp_xmit += xdpsq_red_stats->xmit; + s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; + s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; + s->tx_xdp_nops += xdpsq_red_stats->nops; + s->tx_xdp_full += xdpsq_red_stats->full; + s->tx_xdp_err += xdpsq_red_stats->err; + s->tx_xdp_cqes += xdpsq_red_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s, + struct mlx5e_xdpsq_stats *xdpsq_stats) +{ + s->rx_xdp_tx_xmit += xdpsq_stats->xmit; + s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; + s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; + s->rx_xdp_tx_nops += xdpsq_stats->nops; + s->rx_xdp_tx_full += xdpsq_stats->full; + s->rx_xdp_tx_err += xdpsq_stats->err; + s->rx_xdp_tx_cqe += xdpsq_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s, + struct mlx5e_xdpsq_stats *xsksq_stats) +{ + s->tx_xsk_xmit += xsksq_stats->xmit; + s->tx_xsk_mpwqe += xsksq_stats->mpwqe; + s->tx_xsk_inlnw += xsksq_stats->inlnw; + s->tx_xsk_full += xsksq_stats->full; + s->tx_xsk_err += xsksq_stats->err; + s->tx_xsk_cqes += xsksq_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, + struct mlx5e_rq_stats *xskrq_stats) +{ + s->rx_xsk_packets += xskrq_stats->packets; + s->rx_xsk_bytes += xskrq_stats->bytes; + s->rx_xsk_csum_complete += xskrq_stats->csum_complete; + s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; + s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; + s->rx_xsk_csum_none += xskrq_stats->csum_none; + s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; + s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; + s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; + s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; + s->rx_xsk_wqe_err += xskrq_stats->wqe_err; + s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; + s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; + s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; + s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; + s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; + s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; + s->rx_xsk_congst_umr += xskrq_stats->congst_umr; + s->rx_xsk_arfs_err += xskrq_stats->arfs_err; +} + +static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, + struct mlx5e_rq_stats *rq_stats) +{ + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->rx_lro_packets += rq_stats->lro_packets; + s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_ecn_mark += rq_stats->ecn_mark; + s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_complete_tail += rq_stats->csum_complete_tail; + s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; + s->rx_xdp_drop += rq_stats->xdp_drop; + s->rx_xdp_redirect += rq_stats->xdp_redirect; + s->rx_wqe_err += rq_stats->wqe_err; + s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; + s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; + s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; + s->rx_cache_reuse += rq_stats->cache_reuse; + s->rx_cache_full += rq_stats->cache_full; + s->rx_cache_empty += rq_stats->cache_empty; + s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_waive += rq_stats->cache_waive; + s->rx_congst_umr += rq_stats->congst_umr; + s->rx_arfs_err += rq_stats->arfs_err; + s->rx_recover += rq_stats->recover; +#ifdef CONFIG_MLX5_EN_TLS + s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; + s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; + s->rx_tls_ctx += rq_stats->tls_ctx; + s->rx_tls_del += rq_stats->tls_del; + s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; + s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; + s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; + s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; + s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; + s->rx_tls_err += rq_stats->tls_err; +#endif +} + +static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s, + struct mlx5e_ch_stats *ch_stats) +{ + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_arm += ch_stats->arm; + s->ch_aff_change += ch_stats->aff_change; + s->ch_force_irq += ch_stats->force_irq; + s->ch_eq_rearm += ch_stats->eq_rearm; +} + +static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s, + struct mlx5e_sq_stats *sq_stats) +{ + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_added_vlan_packets += sq_stats->added_vlan_packets; + s->tx_nop += sq_stats->nop; + s->tx_mpwqe_blks += sq_stats->mpwqe_blks; + s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_wake += sq_stats->wake; + s->tx_queue_dropped += sq_stats->dropped; + s->tx_cqe_err += sq_stats->cqe_err; + s->tx_recover += sq_stats->recover; + s->tx_xmit_more += sq_stats->xmit_more; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + s->tx_csum_none += sq_stats->csum_none; + s->tx_csum_partial += sq_stats->csum_partial; +#ifdef CONFIG_MLX5_EN_TLS + s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; + s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; + s->tx_tls_ctx += sq_stats->tls_ctx; + s->tx_tls_ooo += sq_stats->tls_ooo; + s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; + s->tx_tls_dump_packets += sq_stats->tls_dump_packets; + s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; + s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; + s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; + s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; +#endif + s->tx_cqes += sq_stats->cqes; +} + +static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv, + struct mlx5e_sw_stats *s) +{ + int i; + + if (!priv->port_ptp_opened) + return; + + mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->port_ptp_stats.ch); + + for (i = 0; i < priv->max_opened_tc; i++) { + mlx5e_stats_grp_sw_update_stats_sq(s, &priv->port_ptp_stats.sq[i]); + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); + } +} + static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) { struct mlx5e_sw_stats *s = &priv->stats.sw; @@ -258,144 +430,25 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) for (i = 0; i < priv->max_nch; i++) { struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; - struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; - struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; - struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq; - struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; - struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; - struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; int j; - s->rx_packets += rq_stats->packets; - s->rx_bytes += rq_stats->bytes; - s->rx_lro_packets += rq_stats->lro_packets; - s->rx_lro_bytes += rq_stats->lro_bytes; - s->rx_ecn_mark += rq_stats->ecn_mark; - s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; - s->rx_csum_none += rq_stats->csum_none; - s->rx_csum_complete += rq_stats->csum_complete; - s->rx_csum_complete_tail += rq_stats->csum_complete_tail; - s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; - s->rx_csum_unnecessary += rq_stats->csum_unnecessary; - s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; - s->rx_xdp_drop += rq_stats->xdp_drop; - s->rx_xdp_redirect += rq_stats->xdp_redirect; - s->rx_xdp_tx_xmit += xdpsq_stats->xmit; - s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe; - s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw; - s->rx_xdp_tx_nops += xdpsq_stats->nops; - s->rx_xdp_tx_full += xdpsq_stats->full; - s->rx_xdp_tx_err += xdpsq_stats->err; - s->rx_xdp_tx_cqe += xdpsq_stats->cqes; - s->rx_wqe_err += rq_stats->wqe_err; - s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; - s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; - s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; - s->rx_buff_alloc_err += rq_stats->buff_alloc_err; - s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; - s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; - s->rx_cache_reuse += rq_stats->cache_reuse; - s->rx_cache_full += rq_stats->cache_full; - s->rx_cache_empty += rq_stats->cache_empty; - s->rx_cache_busy += rq_stats->cache_busy; - s->rx_cache_waive += rq_stats->cache_waive; - s->rx_congst_umr += rq_stats->congst_umr; - s->rx_arfs_err += rq_stats->arfs_err; - s->rx_recover += rq_stats->recover; -#ifdef CONFIG_MLX5_EN_TLS - s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; - s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; - s->rx_tls_ctx += rq_stats->tls_ctx; - s->rx_tls_del += rq_stats->tls_del; - s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; - s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; - s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; - s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; - s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; - s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; - s->rx_tls_err += rq_stats->tls_err; -#endif - s->ch_events += ch_stats->events; - s->ch_poll += ch_stats->poll; - s->ch_arm += ch_stats->arm; - s->ch_aff_change += ch_stats->aff_change; - s->ch_force_irq += ch_stats->force_irq; - s->ch_eq_rearm += ch_stats->eq_rearm; + mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq); + mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq); + mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch); /* xdp redirect */ - s->tx_xdp_xmit += xdpsq_red_stats->xmit; - s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe; - s->tx_xdp_inlnw += xdpsq_red_stats->inlnw; - s->tx_xdp_nops += xdpsq_red_stats->nops; - s->tx_xdp_full += xdpsq_red_stats->full; - s->tx_xdp_err += xdpsq_red_stats->err; - s->tx_xdp_cqes += xdpsq_red_stats->cqes; + mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq); /* AF_XDP zero-copy */ - s->rx_xsk_packets += xskrq_stats->packets; - s->rx_xsk_bytes += xskrq_stats->bytes; - s->rx_xsk_csum_complete += xskrq_stats->csum_complete; - s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary; - s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner; - s->rx_xsk_csum_none += xskrq_stats->csum_none; - s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; - s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; - s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; - s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; - s->rx_xsk_wqe_err += xskrq_stats->wqe_err; - s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes; - s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides; - s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop; - s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err; - s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks; - s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts; - s->rx_xsk_congst_umr += xskrq_stats->congst_umr; - s->rx_xsk_arfs_err += xskrq_stats->arfs_err; - s->tx_xsk_xmit += xsksq_stats->xmit; - s->tx_xsk_mpwqe += xsksq_stats->mpwqe; - s->tx_xsk_inlnw += xsksq_stats->inlnw; - s->tx_xsk_full += xsksq_stats->full; - s->tx_xsk_err += xsksq_stats->err; - s->tx_xsk_cqes += xsksq_stats->cqes; + mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq); + mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq); for (j = 0; j < priv->max_opened_tc; j++) { - struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; - - s->tx_packets += sq_stats->packets; - s->tx_bytes += sq_stats->bytes; - s->tx_tso_packets += sq_stats->tso_packets; - s->tx_tso_bytes += sq_stats->tso_bytes; - s->tx_tso_inner_packets += sq_stats->tso_inner_packets; - s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; - s->tx_added_vlan_packets += sq_stats->added_vlan_packets; - s->tx_nop += sq_stats->nop; - s->tx_mpwqe_blks += sq_stats->mpwqe_blks; - s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts; - s->tx_queue_stopped += sq_stats->stopped; - s->tx_queue_wake += sq_stats->wake; - s->tx_queue_dropped += sq_stats->dropped; - s->tx_cqe_err += sq_stats->cqe_err; - s->tx_recover += sq_stats->recover; - s->tx_xmit_more += sq_stats->xmit_more; - s->tx_csum_partial_inner += sq_stats->csum_partial_inner; - s->tx_csum_none += sq_stats->csum_none; - s->tx_csum_partial += sq_stats->csum_partial; -#ifdef CONFIG_MLX5_EN_TLS - s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; - s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; - s->tx_tls_ctx += sq_stats->tls_ctx; - s->tx_tls_ooo += sq_stats->tls_ooo; - s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; - s->tx_tls_dump_packets += sq_stats->tls_dump_packets; - s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; - s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data; - s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; - s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; -#endif - s->tx_cqes += sq_stats->cqes; + mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]); /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ barrier(); } } + mlx5e_stats_grp_sw_update_stats_ptp(priv, s); } static const struct counter_desc q_stats_desc[] = { @@ -1656,6 +1709,37 @@ static const struct counter_desc ch_stats_desc[] = { { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, }; +static const struct counter_desc ptp_sq_stats_desc[] = { + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, +}; + +static const struct counter_desc ptp_ch_stats_desc[] = { + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) }, + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) }, + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) }, + { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) }, +}; + +static const struct counter_desc ptp_cq_stats_desc[] = { + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, +}; + #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) @@ -1663,6 +1747,69 @@ static const struct counter_desc ch_stats_desc[] = { #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc) #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc) #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) +#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc) +#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc) +#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc) + +static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp) +{ + return priv->port_ptp_opened ? + NUM_PTP_CH_STATS + + ((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) : + 0; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp) +{ + int i, tc; + + if (!priv->port_ptp_opened) + return idx; + + for (i = 0; i < NUM_PTP_CH_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_ch_stats_desc[i].format); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_SQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_sq_stats_desc[i].format, tc); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_CQ_STATS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ptp_cq_stats_desc[i].format, tc); + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp) +{ + int i, tc; + + if (!priv->port_ptp_opened) + return idx; + + for (i = 0; i < NUM_PTP_CH_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.ch, + ptp_ch_stats_desc, i); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_SQ_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.sq[tc], + ptp_sq_stats_desc, i); + + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < NUM_PTP_CQ_STATS; i++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->port_ptp_stats.cq[tc], + ptp_cq_stats_desc, i); + + return idx; +} + +static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; } static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) { @@ -1784,6 +1931,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0); MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0); MLX5E_DEFINE_STATS_GRP(eth_ext, 0); static MLX5E_DEFINE_STATS_GRP(tls, 0); +static MLX5E_DEFINE_STATS_GRP(ptp, 0); /* The stats groups order is opposite to the update_stats() order calls */ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { @@ -1806,6 +1954,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = { &MLX5E_STATS_GRP(tls), &MLX5E_STATS_GRP(channels), &MLX5E_STATS_GRP(per_port_buff_congest), + &MLX5E_STATS_GRP(ptp), }; unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 162daaadb0d8..e41fc11f2ce7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -51,6 +51,10 @@ #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld) +#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld) + struct counter_desc { char format[ETH_GSTRING_LEN]; size_t offset; /* Byte offset */ @@ -398,6 +402,13 @@ struct mlx5e_ch_stats { u64 eq_rearm; }; +struct mlx5e_ptp_cq_stats { + u64 cqe; + u64 err_cqe; + u64 abort; + u64 abort_abs_diff_ns; +}; + struct mlx5e_stats { struct mlx5e_sw_stats sw; struct mlx5e_qcounter_stats qcnt; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ce710f22b1ff..4cdf834fa74a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -271,8 +271,6 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, return 0; } -#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS) - static struct mlx5_tc_ct_priv * get_ct_priv(struct mlx5e_priv *priv) { @@ -280,7 +278,7 @@ get_ct_priv(struct mlx5e_priv *priv) struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *uplink_rpriv; - if (esw_offloads_mode(esw)) { + if (is_mdev_switchdev_mode(priv->mdev)) { uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; @@ -297,7 +295,7 @@ mlx5_tc_rule_insert(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw_offloads_mode(esw)) + if (is_mdev_switchdev_mode(priv->mdev)) return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); return mlx5e_add_offloaded_nic_rule(priv, spec, attr); @@ -310,7 +308,7 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv, { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - if (esw_offloads_mode(esw)) { + if (is_mdev_switchdev_mode(priv->mdev)) { mlx5_eswitch_del_offloaded_rule(esw, rule, attr); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index d97203cf6a00..e47e2a0059d0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -32,6 +32,7 @@ #include <linux/tcp.h> #include <linux/if_vlan.h> +#include <linux/ptp_classify.h> #include <net/geneve.h> #include <net/dsfield.h> #include "en.h" @@ -39,6 +40,7 @@ #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" #include "lib/clock.h" +#include "en/ptp.h" static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) { @@ -66,14 +68,73 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb } #endif +static bool mlx5e_use_ptpsq(struct sk_buff *skb) +{ + struct flow_keys fk; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return false; + + if (fk.basic.n_proto == htons(ETH_P_1588)) + return true; + + if (fk.basic.n_proto != htons(ETH_P_IP) && + fk.basic.n_proto != htons(ETH_P_IPV6)) + return false; + + return (fk.basic.ip_proto == IPPROTO_UDP && + fk.ports.dst == htons(PTP_EV_PORT)); +} + +static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int up = 0; + + if (!netdev_get_num_tc(dev)) + goto return_txq; + +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) + up = mlx5e_get_dscp_up(priv, skb); + else +#endif + if (skb_vlan_tag_present(skb)) + up = skb_vlan_tag_get_prio(skb); + +return_txq: + return priv->port_ptp_tc2realtxq[up]; +} + u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { - int txq_ix = netdev_pick_tx(dev, skb, NULL); struct mlx5e_priv *priv = netdev_priv(dev); + int txq_ix; int up = 0; int ch_ix; + if (unlikely(priv->channels.port_ptp)) { + int num_tc_x_num_ch; + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + mlx5e_use_ptpsq(skb)) + return mlx5e_select_ptpsq(dev, skb); + + /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ + num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); + + txq_ix = netdev_pick_tx(dev, skb, NULL); + /* Fix netdev_pick_tx() not to choose ptp_channel txqs. + * If they are selected, switch to regular queues. + * Driver to select these queues only at mlx5e_select_ptpsq(). + */ + if (unlikely(txq_ix >= num_tc_x_num_ch)) + txq_ix %= num_tc_x_num_ch; + } else { + txq_ix = netdev_pick_tx(dev, skb, NULL); + } + if (!netdev_get_num_tc(dev)) return txq_ix; @@ -402,6 +463,12 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_tx_check_stop(sq); + if (unlikely(sq->ptpsq)) { + mlx5e_skb_cb_hwtstamp_init(skb); + mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); + skb_get(skb); + } + send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more); if (send_doorbell) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); @@ -579,7 +646,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, goto err_unmap; mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); - mlx5e_skb_fifo_push(sq, skb); + mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); mlx5e_tx_mpwqe_add_dseg(sq, &txd); @@ -707,7 +774,11 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, u64 ts = get_cqe_ts(cqe); hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts); - skb_tstamp_tx(skb, &hwts); + if (sq->ptpsq) + mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, + hwts.hwtstamp, sq->ptpsq->cq_stats); + else + skb_tstamp_tx(skb, &hwts); } napi_consume_skb(skb, napi_budget); @@ -719,7 +790,7 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t int i; for (i = 0; i < wi->num_fifo_pkts; i++) { - struct sk_buff *skb = mlx5e_skb_fifo_pop(sq); + struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo); mlx5e_consume_skb(sq, skb, cqe, napi_budget); } @@ -805,8 +876,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); - queue_work(cq->channel->priv->wq, - &sq->recover_work); + queue_work(cq->priv->wq, &sq->recover_work); } stats->cqe_err++; } @@ -840,7 +910,7 @@ static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_ int i; for (i = 0; i < wi->num_fifo_pkts; i++) - dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq)); + dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo)); } void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index d5868670f8a5..1ec3d62f026d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -221,14 +221,13 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) napi_schedule(cq->napi); cq->event_ctr++; - cq->channel->stats->events++; + cq->ch_stats->events++; } void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); - struct mlx5e_channel *c = cq->channel; - struct net_device *netdev = c->netdev; + struct net_device *netdev = cq->netdev; netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n", __func__, mcq->cqn, event); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 8ebfe782f95e..fc0afa03d407 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, eqe = next_eqe_sw(eq); if (!eqe) - goto out; + return 0; do { struct mlx5_core_cq *cq; @@ -161,8 +161,6 @@ static int mlx5_eq_comp_int(struct notifier_block *nb, ++eq->cons_index; } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); - -out: eq_update_ci(eq, 1); if (cqn != -1) @@ -189,19 +187,21 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) return count_eqe; } -static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags) +static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery, + unsigned long *flags) __acquires(&eq->lock) { - if (in_irq()) + if (!recovery) spin_lock(&eq->lock); else spin_lock_irqsave(&eq->lock, *flags); } -static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags) +static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery, + unsigned long *flags) __releases(&eq->lock) { - if (in_irq()) + if (!recovery) spin_unlock(&eq->lock); else spin_unlock_irqrestore(&eq->lock, *flags); @@ -223,11 +223,13 @@ static int mlx5_eq_async_int(struct notifier_block *nb, struct mlx5_eqe *eqe; unsigned long flags; int num_eqes = 0; + bool recovery; dev = eq->dev; eqt = dev->priv.eq_table; - mlx5_eq_async_int_lock(eq_async, &flags); + recovery = action == ASYNC_EQ_RECOVER; + mlx5_eq_async_int_lock(eq_async, recovery, &flags); eqe = next_eqe_sw(eq); if (!eqe) @@ -246,12 +248,12 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ++eq->cons_index; } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq))); + eq_update_ci(eq, 1); out: - eq_update_ci(eq, 1); - mlx5_eq_async_int_unlock(eq_async, &flags); + mlx5_eq_async_int_unlock(eq_async, recovery, &flags); - return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0; + return unlikely(recovery) ? num_eqes : 0; } void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c index d46f8b225ebe..2b85d4777303 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c @@ -101,7 +101,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, vport->egress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); - if (IS_ERR_OR_NULL(vport->egress.acl)) { + if (IS_ERR(vport->egress.acl)) { err = PTR_ERR(vport->egress.acl); vport->egress.acl = NULL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c index c3faae67e4d6..4c74e2690d57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -173,7 +173,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport table_size++; vport->egress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size); - if (IS_ERR_OR_NULL(vport->egress.acl)) { + if (IS_ERR(vport->egress.acl)) { err = PTR_ERR(vport->egress.acl); vport->egress.acl = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c index 22f4c1c28006..4a369669e51e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c @@ -8,6 +8,7 @@ struct mlx5_flow_table * esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) { + struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_table *acl; @@ -33,7 +34,9 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size) return ERR_PTR(-EOPNOTSUPP); } - acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num); + ft_attr.max_fte = size; + ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT; + acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num); if (IS_ERR(acl)) { err = PTR_ERR(acl); esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index b68976b378b8..d64fad2823e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -180,7 +180,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, vport->ingress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, table_size); - if (IS_ERR_OR_NULL(vport->ingress.acl)) { + if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c index 4e55d7225a26..548c005ea633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c @@ -258,7 +258,7 @@ int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, vport->ingress.acl = esw_acl_table_create(esw, vport->vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, num_ftes); - if (IS_ERR_OR_NULL(vport->ingress.acl)) { + if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d4ee0a9c03db..da901e364656 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1474,6 +1474,26 @@ vf_err: return err; } +static int host_pf_enable_hca(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_is_ecpf(dev)) + return 0; + + /* Once vport and representor are ready, take out the external host PF + * out of initializing state. Enabling HCA clears the iser->initializing + * bit and host PF driver loading can progress. + */ + return mlx5_cmd_host_pf_enable_hca(dev); +} + +static void host_pf_disable_hca(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_is_ecpf(dev)) + return; + + mlx5_cmd_host_pf_disable_hca(dev); +} + /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs * whichever are present on the eswitch. */ @@ -1488,6 +1508,11 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, if (ret) return ret; + /* Enable external host PF HCA */ + ret = host_pf_enable_hca(esw->dev); + if (ret) + goto pf_hca_err; + /* Enable ECPF vport */ if (mlx5_ecpf_vport_exists(esw->dev)) { ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events); @@ -1505,8 +1530,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, vf_err: if (mlx5_ecpf_vport_exists(esw->dev)) mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); - ecpf_err: + host_pf_disable_hca(esw->dev); +pf_hca_err: mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); return ret; } @@ -1521,6 +1547,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw) if (mlx5_ecpf_vport_exists(esw->dev)) mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF); + host_pf_disable_hca(esw->dev); mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF); } @@ -1614,8 +1641,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) if (mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_rescan_drivers(esw->dev); err = esw_offloads_enable(esw); } @@ -1633,10 +1659,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) abort: esw->mode = MLX5_ESWITCH_NONE; - if (mode == MLX5_ESWITCH_OFFLOADS) { - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); - } + if (mode == MLX5_ESWITCH_OFFLOADS) + mlx5_rescan_drivers(esw->dev); + esw_destroy_tsar(esw); return err; } @@ -1697,10 +1722,9 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) mlx5_lag_update(esw->dev); - if (old_mode == MLX5_ESWITCH_OFFLOADS) { - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); - } + if (old_mode == MLX5_ESWITCH_OFFLOADS) + mlx5_rescan_drivers(esw->dev); + esw_destroy_tsar(esw); if (clear_vf) @@ -2439,8 +2463,10 @@ free_out: return err; } -u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) +u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev) { + struct mlx5_eswitch *esw = dev->priv.eswitch; + return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c9c2962ad49f..2f6a0ae20650 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -484,7 +484,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, } } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[i].ft = fwd_fdb, + dest[i].ft = fwd_fdb; i++; mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); @@ -1680,7 +1680,6 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw) goto out_free; } - memset(flow_group_in, 0, inlen); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); misc = MLX5_ADDR_OF(fte_match_param, match_criteria, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h index 656f96be6e20..89ef592656c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h @@ -47,11 +47,12 @@ /** * enum mlx5_fpga_access_type - Enumerated the different methods possible for * accessing the device memory address space + * + * @MLX5_FPGA_ACCESS_TYPE_I2C: Use the slow CX-FPGA I2C bus + * @MLX5_FPGA_ACCESS_TYPE_DONTCARE: Use the fastest available method */ enum mlx5_fpga_access_type { - /** Use the slow CX-FPGA I2C bus */ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, - /** Use the fastest available method */ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0, }; @@ -113,6 +114,7 @@ struct mlx5_fpga_conn_attr { * subsequent receives. */ void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf); + /** @cb_arg: A context to be passed to recv_cb callback */ void *cb_arg; }; @@ -145,7 +147,7 @@ void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn); /** * mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet - * @fdev: An FPGA SBU connection + * @conn: An FPGA SBU connection * @buf: The packet buffer * * Queues a packet for transmission over an FPGA SBU connection. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index babe3405132a..8e06731d3cb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -172,10 +172,9 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); - if (ft->vport) { - MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); - MLX5_SET(set_flow_table_root_in, in, other_vport, 1); - } + MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); + MLX5_SET(set_flow_table_root_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); return mlx5_cmd_exec_in(dev, set_flow_table_root, in); } @@ -199,10 +198,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_SET(create_flow_table_in, in, table_type, ft->type); MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size); - if (ft->vport) { - MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); - MLX5_SET(create_flow_table_in, in, other_vport, 1); - } + MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(create_flow_table_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, en_decap); @@ -252,10 +250,9 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, MLX5_CMD_OP_DESTROY_FLOW_TABLE); MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); - if (ft->vport) { - MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); - MLX5_SET(destroy_flow_table_in, in, other_vport, 1); - } + MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(destroy_flow_table_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); return mlx5_cmd_exec_in(dev, destroy_flow_table, in); } @@ -283,11 +280,9 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, flow_table_context.lag_master_next_table_id, 0); } } else { - if (ft->vport) { - MLX5_SET(modify_flow_table_in, in, vport_number, - ft->vport); - MLX5_SET(modify_flow_table_in, in, other_vport, 1); - } + MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(modify_flow_table_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); MLX5_SET(modify_flow_table_in, in, modify_field_select, MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); if (next_ft) { @@ -325,6 +320,9 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, MLX5_SET(create_flow_group_in, in, other_vport, 1); } + MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); + MLX5_SET(create_flow_group_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); if (!err) fg->id = MLX5_GET(create_flow_group_out, out, @@ -344,11 +342,9 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); MLX5_SET(destroy_flow_group_in, in, group_id, fg->id); - if (ft->vport) { - MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); - MLX5_SET(destroy_flow_group_in, in, other_vport, 1); - } - + MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); + MLX5_SET(destroy_flow_group_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); return mlx5_cmd_exec_in(dev, destroy_flow_group, in); } @@ -427,10 +423,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, ignore_flow_level, !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); - if (ft->vport) { - MLX5_SET(set_fte_in, in, vport_number, ft->vport); - MLX5_SET(set_fte_in, in, other_vport, 1); - } + MLX5_SET(set_fte_in, in, vport_number, ft->vport); + MLX5_SET(set_fte_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); @@ -515,6 +510,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, dst->dest_attr.vport.pkt_reformat->id); } break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: + id = dst->dest_attr.sampler_id; + break; default: id = dst->dest_attr.tir_num; } @@ -601,10 +599,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, MLX5_SET(delete_fte_in, in, table_type, ft->type); MLX5_SET(delete_fte_in, in, table_id, ft->id); MLX5_SET(delete_fte_in, in, flow_index, fte->index); - if (ft->vport) { - MLX5_SET(delete_fte_in, in, vport_number, ft->vport); - MLX5_SET(delete_fte_in, in, other_vport, 1); - } + MLX5_SET(delete_fte_in, in, vport_number, ft->vport); + MLX5_SET(delete_fte_in, in, other_vport, + !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); return mlx5_cmd_exec_in(dev, delete_fte, in); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9fdd99272e31..b899539a0786 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1152,18 +1152,13 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, { return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0); } +EXPORT_SYMBOL(mlx5_create_flow_table); -struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, - int prio, int max_fte, - u32 level, u16 vport) +struct mlx5_flow_table * +mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + struct mlx5_flow_table_attr *ft_attr, u16 vport) { - struct mlx5_flow_table_attr ft_attr = {}; - - ft_attr.max_fte = max_fte; - ft_attr.level = level; - ft_attr.prio = prio; - - return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); + return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport); } struct mlx5_flow_table* @@ -1243,6 +1238,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, return fg; } +EXPORT_SYMBOL(mlx5_create_flow_group); static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) { @@ -2146,6 +2142,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", fg->id); } +EXPORT_SYMBOL(mlx5_destroy_flow_group); struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index afe7f0bffb93..b24a9849c45e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -194,7 +194,7 @@ struct mlx5_ft_underlay_qp { u32 qpn; }; -#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00 +#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00 /* Calculate the fte_match_param length and without the reserved length. * Make sure the reserved field is the last. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index cac8f085b16d..97d96fc38a65 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -39,7 +39,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev, struct mlx5e_priv *priv = mlx5i_epriv(dev); mlx5e_ethtool_get_drvinfo(priv, drvinfo); - strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]", + strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]", sizeof(drvinfo->driver)); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 33081b24f10a..f3d45ef082cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -243,24 +243,30 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) #endif } -static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev) +static void mlx5_lag_add_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) - if (ldev->pf[i].dev) - mlx5_add_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + for (i = 0; i < MLX5_MAX_PORTS; i++) { + if (!ldev->pf[i].dev) + continue; + + ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(ldev->pf[i].dev); + } } -static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev) +static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) { int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) - if (ldev->pf[i].dev) - mlx5_remove_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + for (i = 0; i < MLX5_MAX_PORTS; i++) { + if (!ldev->pf[i].dev) + continue; + + ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(ldev->pf[i].dev); + } } static void mlx5_do_bond(struct mlx5_lag *ldev) @@ -290,20 +296,21 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) #endif if (roce_lag) - mlx5_lag_remove_ib_devices(ldev); + mlx5_lag_remove_devices(ldev); err = mlx5_activate_lag(ldev, &tracker, roce_lag ? MLX5_LAG_FLAG_ROCE : MLX5_LAG_FLAG_SRIOV); if (err) { if (roce_lag) - mlx5_lag_add_ib_devices(ldev); + mlx5_lag_add_devices(ldev); return; } if (roce_lag) { - mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); mlx5_nic_vport_enable_roce(dev1); } } else if (do_bond && __mlx5_lag_is_active(ldev)) { @@ -312,7 +319,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) roce_lag = __mlx5_lag_is_roce(ldev); if (roce_lag) { - mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); mlx5_nic_vport_disable_roce(dev1); } @@ -321,7 +329,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) return; if (roce_lag) - mlx5_lag_add_ib_devices(ldev); + mlx5_lag_add_devices(ldev); } } @@ -596,6 +604,8 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) if (err) mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", err); + + return; } /* Must be called with intf_mutex held */ @@ -739,24 +749,6 @@ unlock: } EXPORT_SYMBOL(mlx5_lag_get_slave_port); -bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) -{ - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, - priv); - struct mlx5_lag *ldev; - - if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB) - return true; - - ldev = mlx5_lag_dev_get(dev); - if (!ldev || !__mlx5_lag_is_roce(ldev) || - ldev->pf[MLX5_LAG_P1].dev == dev) - return true; - - /* If bonded, we do not add an IB device for PF1. */ - return false; -} - int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8ff207aa1479..c08315b51fd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -50,6 +50,7 @@ #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif +#include <linux/version.h> #include <net/devlink.h> #include "mlx5_core.h" #include "lib/eq.h" @@ -76,7 +77,6 @@ MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(DRIVER_VERSION); unsigned int mlx5_core_debug_mask; module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644); @@ -227,13 +227,16 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev) strncat(string, ",", remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, DRIVER_NAME, remaining_size); + strncat(string, KBUILD_MODNAME, remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); strncat(string, ",", remaining_size); remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); - strncat(string, DRIVER_VERSION, remaining_size); + + snprintf(string + strlen(string), remaining_size, "%u.%u.%u", + (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff), + (u16)(LINUX_VERSION_CODE & 0xffff)); /*Send the command*/ MLX5_SET(set_driver_version_in, in, opcode, @@ -309,7 +312,7 @@ static int request_bar(struct pci_dev *pdev) return -ENODEV; } - err = pci_request_regions(pdev, DRIVER_NAME); + err = pci_request_regions(pdev, KBUILD_MODNAME); if (err) dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); @@ -1126,23 +1129,23 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_sriov; } - err = mlx5_sriov_attach(dev); - if (err) { - mlx5_core_err(dev, "sriov init failed %d\n", err); - goto err_sriov; - } - err = mlx5_ec_init(dev); if (err) { mlx5_core_err(dev, "Failed to init embedded CPU\n"); goto err_ec; } + err = mlx5_sriov_attach(dev); + if (err) { + mlx5_core_err(dev, "sriov init failed %d\n", err); + goto err_sriov; + } + return 0; -err_ec: - mlx5_sriov_detach(dev); err_sriov: + mlx5_ec_cleanup(dev); +err_ec: mlx5_cleanup_fs(dev); err_fs: mlx5_accel_tls_cleanup(dev); @@ -1168,8 +1171,8 @@ err_irq_table: static void mlx5_unload(struct mlx5_core_dev *dev) { - mlx5_ec_cleanup(dev); mlx5_sriov_detach(dev); + mlx5_ec_cleanup(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); mlx5_accel_tls_cleanup(dev); @@ -1219,14 +1222,21 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot) err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); if (err) goto err_devlink_reg; - mlx5_register_device(dev); + + err = mlx5_register_device(dev); } else { - mlx5_attach_device(dev); + err = mlx5_attach_device(dev); } + if (err) + goto err_register; + mutex_unlock(&dev->intf_state_mutex); return 0; +err_register: + if (boot) + mlx5_devlink_unregister(priv_to_devlink(dev)); err_devlink_reg: clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); mlx5_unload(dev); @@ -1303,8 +1313,14 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) if (err) goto err_pagealloc_init; + err = mlx5_adev_init(dev); + if (err) + goto err_adev_init; + return 0; +err_adev_init: + mlx5_pagealloc_cleanup(dev); err_pagealloc_init: mlx5_health_cleanup(dev); err_health_init: @@ -1321,6 +1337,7 @@ static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; + mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); debugfs_remove_recursive(dev->priv.dbg_root); @@ -1331,7 +1348,6 @@ static void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mutex_destroy(&dev->intf_state_mutex); } -#define MLX5_IB_MOD "mlx5_ib" static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; @@ -1351,6 +1367,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ? MLX5_COREDEV_VF : MLX5_COREDEV_PF; + dev->priv.adev_idx = mlx5_adev_idx_alloc(); + if (dev->priv.adev_idx < 0) + return dev->priv.adev_idx; + err = mlx5_mdev_init(dev, prof_sel); if (err) goto mdev_init_err; @@ -1369,8 +1389,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_load_one; } - request_module_nowait(MLX5_IB_MOD); - err = mlx5_crdump_enable(dev); if (err) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); @@ -1384,6 +1402,7 @@ err_load_one: pci_init_err: mlx5_mdev_uninit(dev); mdev_init_err: + mlx5_adev_idx_free(dev->priv.adev_idx); mlx5_devlink_free(devlink); return err; @@ -1400,6 +1419,7 @@ static void remove_one(struct pci_dev *pdev) mlx5_unload_one(dev, true); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); + mlx5_adev_idx_free(dev->priv.adev_idx); mlx5_devlink_free(devlink); } @@ -1594,6 +1614,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */ { 0, } }; @@ -1613,7 +1634,7 @@ void mlx5_recover_device(struct mlx5_core_dev *dev) } static struct pci_driver mlx5_core_driver = { - .name = DRIVER_NAME, + .name = KBUILD_MODNAME, .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, @@ -1639,6 +1660,9 @@ static int __init init(void) { int err; + WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME), + "mlx5_core name not in sync with kernel module name"); + get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); @@ -1650,7 +1674,11 @@ static int __init init(void) goto err_debug; #ifdef CONFIG_MLX5_CORE_EN - mlx5e_init(); + err = mlx5e_init(); + if (err) { + pci_unregister_driver(&mlx5_core_driver); + goto err_debug; + } #endif return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 8cec85ab419d..0a0302ce7144 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -42,9 +42,6 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/driver.h> -#define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "5.0-0" - extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ @@ -122,6 +119,10 @@ enum mlx5_semaphore_space_address { int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); +int mlx5_cmd_init(struct mlx5_core_dev *dev); +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +void mlx5_cmd_set_state(struct mlx5_core_dev *dev, + enum mlx5_cmdif_state cmdif_state); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); @@ -181,22 +182,20 @@ void mlx5_events_cleanup(struct mlx5_core_dev *dev); void mlx5_events_start(struct mlx5_core_dev *dev); void mlx5_events_stop(struct mlx5_core_dev *dev); -void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv); -void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv); -void mlx5_attach_device(struct mlx5_core_dev *dev); +int mlx5_adev_idx_alloc(void); +void mlx5_adev_idx_free(int idx); +void mlx5_adev_cleanup(struct mlx5_core_dev *dev); +int mlx5_adev_init(struct mlx5_core_dev *dev); + +int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); -bool mlx5_device_registered(struct mlx5_core_dev *dev); -void mlx5_register_device(struct mlx5_core_dev *dev); +int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); -void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); -void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol); struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); int mlx5_dev_list_trylock(void); -bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv); - int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); @@ -215,7 +214,7 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw, int mlx5_fw_version_query(struct mlx5_core_dev *dev, u32 *running_ver, u32 *stored_ver); -void mlx5e_init(void); +int mlx5e_init(void); void mlx5e_cleanup(void); static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) @@ -235,7 +234,17 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) MLX5_CAP_GEN(dev, lag_master); } -void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); +int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); +static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) +{ + int ret; + + mlx5_dev_list_lock(); + ret = mlx5_rescan_drivers_locked(dev); + mlx5_dev_list_unlock(); + return ret; +} + void mlx5_lag_update(struct mlx5_core_dev *dev); enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4d7f8a357df7..eb956ce904bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -374,7 +374,7 @@ retry: if (func_id) dev->priv.vfs_pages += npages; else if (mlx5_core_is_ecpf(dev) && !ec_function) - dev->priv.peer_pf_pages += npages; + dev->priv.host_pf_pages += npages; mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", npages, ec_function, func_id, err); @@ -416,7 +416,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, if (func_id) dev->priv.vfs_pages -= npages; else if (mlx5_core_is_ecpf(dev) && !ec_function) - dev->priv.peer_pf_pages -= npages; + dev->priv.host_pf_pages -= npages; mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n", npages, ec_function, func_id); @@ -523,7 +523,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, if (func_id) dev->priv.vfs_pages -= num_claimed; else if (mlx5_core_is_ecpf(dev) && !ec_function) - dev->priv.peer_pf_pages -= num_claimed; + dev->priv.host_pf_pages -= num_claimed; out_free: kvfree(out); @@ -678,9 +678,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) WARN(dev->priv.vfs_pages, "VFs FW pages counter is %d after reclaiming all pages\n", dev->priv.vfs_pages); - WARN(dev->priv.peer_pf_pages, - "Peer PF FW pages counter is %d after reclaiming all pages\n", - dev->priv.peer_pf_pages); + WARN(dev->priv.host_pf_pages, + "External host PF FW pages counter is %d after reclaiming all pages\n", + dev->priv.host_pf_pages); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c new file mode 100644 index 000000000000..7df11a019df9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 - 2008 Mellanox Technologies. All rights reserved. + * Copyright (c) 2006 - 2007 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. + */ + +#include "dr_types.h" + +int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int max_order) +{ + int i; + + buddy->max_order = max_order; + + INIT_LIST_HEAD(&buddy->list_node); + INIT_LIST_HEAD(&buddy->used_list); + INIT_LIST_HEAD(&buddy->hot_list); + + buddy->bitmap = kcalloc(buddy->max_order + 1, + sizeof(*buddy->bitmap), + GFP_KERNEL); + buddy->num_free = kcalloc(buddy->max_order + 1, + sizeof(*buddy->num_free), + GFP_KERNEL); + + if (!buddy->bitmap || !buddy->num_free) + goto err_free_all; + + /* Allocating max_order bitmaps, one for each order */ + + for (i = 0; i <= buddy->max_order; ++i) { + unsigned int size = 1 << (buddy->max_order - i); + + buddy->bitmap[i] = bitmap_zalloc(size, GFP_KERNEL); + if (!buddy->bitmap[i]) + goto err_out_free_each_bit_per_order; + } + + /* In the beginning, we have only one order that is available for + * use (the biggest one), so mark the first bit in both bitmaps. + */ + + bitmap_set(buddy->bitmap[buddy->max_order], 0, 1); + + buddy->num_free[buddy->max_order] = 1; + + return 0; + +err_out_free_each_bit_per_order: + for (i = 0; i <= buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + +err_free_all: + kfree(buddy->num_free); + kfree(buddy->bitmap); + return -ENOMEM; +} + +void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy) +{ + int i; + + list_del(&buddy->list_node); + + for (i = 0; i <= buddy->max_order; ++i) + bitmap_free(buddy->bitmap[i]); + + kfree(buddy->num_free); + kfree(buddy->bitmap); +} + +static int dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int start_order, + unsigned int *segment, + unsigned int *order) +{ + unsigned int seg, order_iter, m; + + for (order_iter = start_order; + order_iter <= buddy->max_order; ++order_iter) { + if (!buddy->num_free[order_iter]) + continue; + + m = 1 << (buddy->max_order - order_iter); + seg = find_first_bit(buddy->bitmap[order_iter], m); + + if (WARN(seg >= m, + "ICM Buddy: failed finding free mem for order %d\n", + order_iter)) + return -ENOMEM; + + break; + } + + if (order_iter > buddy->max_order) + return -ENOMEM; + + *segment = seg; + *order = order_iter; + return 0; +} + +/** + * mlx5dr_buddy_alloc_mem() - Update second level bitmap. + * @buddy: Buddy to update. + * @order: Order of the buddy to update. + * @segment: Segment number. + * + * This function finds the first area of the ICM memory managed by this buddy. + * It uses the data structures of the buddy system in order to find the first + * area of free place, starting from the current order till the maximum order + * in the system. + * + * Return: 0 when segment is set, non-zero error status otherwise. + * + * The function returns the location (segment) in the whole buddy ICM memory + * area - the index of the memory segment that is available for use. + */ +int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int order, + unsigned int *segment) +{ + unsigned int seg, order_iter; + int err; + + err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter); + if (err) + return err; + + bitmap_clear(buddy->bitmap[order_iter], seg, 1); + --buddy->num_free[order_iter]; + + /* If we found free memory in some order that is bigger than the + * required order, we need to split every order between the required + * order and the order that we found into two parts, and mark accordingly. + */ + while (order_iter > order) { + --order_iter; + seg <<= 1; + bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1); + ++buddy->num_free[order_iter]; + } + + seg <<= order; + *segment = seg; + + return 0; +} + +void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int seg, unsigned int order) +{ + seg >>= order; + + /* Whenever a segment is free, + * the mem is added to the buddy that gave it. + */ + while (test_bit(seg ^ 1, buddy->bitmap[order])) { + bitmap_clear(buddy->bitmap[order], seg ^ 1, 1); + --buddy->num_free[order]; + seg >>= 1; + ++order; + } + bitmap_set(buddy->bitmap[order], seg, 1); + + ++buddy->num_free[order]; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 51bbd88ff021..ba65ec406cfa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -94,12 +94,12 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); - if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) { + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); } - if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) { + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) { caps->flex_parser_id_icmpv6_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0); caps->flex_parser_id_icmpv6_dw1 = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c index cc33515b9aba..66c24767e3b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c @@ -4,50 +4,16 @@ #include "dr_types.h" #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64 -#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024) - -struct mlx5dr_icm_pool; - -struct mlx5dr_icm_bucket { - struct mlx5dr_icm_pool *pool; - - /* Chunks that aren't visible to HW not directly and not in cache */ - struct list_head free_list; - unsigned int free_list_count; - - /* Used chunks, HW may be accessing this memory */ - struct list_head used_list; - unsigned int used_list_count; - - /* HW may be accessing this memory but at some future, - * undetermined time, it might cease to do so. Before deciding to call - * sync_ste, this list is moved to sync_list - */ - struct list_head hot_list; - unsigned int hot_list_count; - - /* Pending sync list, entries from the hot list are moved to this list. - * sync_ste is executed and then sync_list is concatenated to the free list - */ - struct list_head sync_list; - unsigned int sync_list_count; - - u32 total_chunks; - u32 num_of_entries; - u32 entry_size; - /* protect the ICM bucket */ - struct mutex mutex; -}; +#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024) struct mlx5dr_icm_pool { - struct mlx5dr_icm_bucket *buckets; enum mlx5dr_icm_type icm_type; enum mlx5dr_icm_chunk_size max_log_chunk_sz; - enum mlx5dr_icm_chunk_size num_of_buckets; - struct list_head icm_mr_list; - /* protect the ICM MR list */ - struct mutex mr_mutex; struct mlx5dr_domain *dmn; + /* memory management */ + struct mutex mutex; /* protect the ICM pool and ICM buddy */ + struct list_head buddy_mem_list; + u64 hot_memory_size; }; struct mlx5dr_icm_dm { @@ -58,13 +24,11 @@ struct mlx5dr_icm_dm { }; struct mlx5dr_icm_mr { - struct mlx5dr_icm_pool *pool; struct mlx5_core_mkey mkey; struct mlx5dr_icm_dm dm; - size_t used_length; + struct mlx5dr_domain *dmn; size_t length; u64 icm_start_addr; - struct list_head mr_list; }; static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev, @@ -107,8 +71,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) if (!icm_mr) return NULL; - icm_mr->pool = pool; - INIT_LIST_HEAD(&icm_mr->mr_list); + icm_mr->dmn = pool->dmn; icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, pool->icm_type); @@ -150,8 +113,6 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) goto free_mkey; } - list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list); - return icm_mr; free_mkey: @@ -166,10 +127,9 @@ free_icm_mr: static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) { - struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev; + struct mlx5_core_dev *mdev = icm_mr->dmn->mdev; struct mlx5dr_icm_dm *dm = &icm_mr->dm; - list_del(&icm_mr->mr_list); mlx5_core_destroy_mkey(mdev, &icm_mr->mkey); mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0, dm->addr, dm->obj_id); @@ -178,19 +138,17 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr) static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk) { - struct mlx5dr_icm_bucket *bucket = chunk->bucket; - - chunk->ste_arr = kvzalloc(bucket->num_of_entries * + chunk->ste_arr = kvzalloc(chunk->num_of_entries * sizeof(chunk->ste_arr[0]), GFP_KERNEL); if (!chunk->ste_arr) return -ENOMEM; - chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries * + chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries * DR_STE_SIZE_REDUCED, GFP_KERNEL); if (!chunk->hw_ste_arr) goto out_free_ste_arr; - chunk->miss_list = kvmalloc(bucket->num_of_entries * + chunk->miss_list = kvmalloc(chunk->num_of_entries * sizeof(chunk->miss_list[0]), GFP_KERNEL); if (!chunk->miss_list) goto out_free_hw_ste_arr; @@ -204,72 +162,6 @@ out_free_ste_arr: return -ENOMEM; } -static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket) -{ - size_t mr_free_size, mr_req_size, mr_row_size; - struct mlx5dr_icm_pool *pool = bucket->pool; - struct mlx5dr_icm_mr *icm_mr = NULL; - struct mlx5dr_icm_chunk *chunk; - int i, err = 0; - - mr_req_size = bucket->num_of_entries * bucket->entry_size; - mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, - pool->icm_type); - mutex_lock(&pool->mr_mutex); - if (!list_empty(&pool->icm_mr_list)) { - icm_mr = list_last_entry(&pool->icm_mr_list, - struct mlx5dr_icm_mr, mr_list); - - if (icm_mr) - mr_free_size = icm_mr->dm.length - icm_mr->used_length; - } - - if (!icm_mr || mr_free_size < mr_row_size) { - icm_mr = dr_icm_pool_mr_create(pool); - if (!icm_mr) { - err = -ENOMEM; - goto out_err; - } - } - - /* Create memory aligned chunks */ - for (i = 0; i < mr_row_size / mr_req_size; i++) { - chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL); - if (!chunk) { - err = -ENOMEM; - goto out_err; - } - - chunk->bucket = bucket; - chunk->rkey = icm_mr->mkey.key; - /* mr start addr is zero based */ - chunk->mr_addr = icm_mr->used_length; - chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length; - icm_mr->used_length += mr_req_size; - chunk->num_of_entries = bucket->num_of_entries; - chunk->byte_size = chunk->num_of_entries * bucket->entry_size; - - if (pool->icm_type == DR_ICM_TYPE_STE) { - err = dr_icm_chunk_ste_init(chunk); - if (err) - goto out_free_chunk; - } - - INIT_LIST_HEAD(&chunk->chunk_list); - list_add(&chunk->chunk_list, &bucket->free_list); - bucket->free_list_count++; - bucket->total_chunks++; - } - mutex_unlock(&pool->mr_mutex); - return 0; - -out_free_chunk: - kvfree(chunk); -out_err: - mutex_unlock(&pool->mr_mutex); - return err; -} - static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) { kvfree(chunk->miss_list); @@ -277,166 +169,199 @@ static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk) kvfree(chunk->ste_arr); } -static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk) +static enum mlx5dr_icm_type +get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk) +{ + return chunk->buddy_mem->pool->icm_type; +} + +static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk, + struct mlx5dr_icm_buddy_mem *buddy) { - struct mlx5dr_icm_bucket *bucket = chunk->bucket; + enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk); + buddy->used_memory -= chunk->byte_size; list_del(&chunk->chunk_list); - bucket->total_chunks--; - if (bucket->pool->icm_type == DR_ICM_TYPE_STE) + if (icm_type == DR_ICM_TYPE_STE) dr_icm_chunk_ste_cleanup(chunk); kvfree(chunk); } -static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *bucket, - enum mlx5dr_icm_chunk_size chunk_size) +static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) { - if (pool->icm_type == DR_ICM_TYPE_STE) - bucket->entry_size = DR_STE_SIZE; - else - bucket->entry_size = DR_MODIFY_ACTION_SIZE; - - bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); - bucket->pool = pool; - mutex_init(&bucket->mutex); - INIT_LIST_HEAD(&bucket->free_list); - INIT_LIST_HEAD(&bucket->used_list); - INIT_LIST_HEAD(&bucket->hot_list); - INIT_LIST_HEAD(&bucket->sync_list); + struct mlx5dr_icm_buddy_mem *buddy; + struct mlx5dr_icm_mr *icm_mr; + + icm_mr = dr_icm_pool_mr_create(pool); + if (!icm_mr) + return -ENOMEM; + + buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL); + if (!buddy) + goto free_mr; + + if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz)) + goto err_free_buddy; + + buddy->icm_mr = icm_mr; + buddy->pool = pool; + + /* add it to the -start- of the list in order to search in it first */ + list_add(&buddy->list_node, &pool->buddy_mem_list); + + return 0; + +err_free_buddy: + kvfree(buddy); +free_mr: + dr_icm_pool_mr_destroy(icm_mr); + return -ENOMEM; } -static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket) +static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy) { struct mlx5dr_icm_chunk *chunk, *next; - mutex_destroy(&bucket->mutex); - list_splice_tail_init(&bucket->sync_list, &bucket->free_list); - list_splice_tail_init(&bucket->hot_list, &bucket->free_list); + list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list) + dr_icm_chunk_destroy(chunk, buddy); + + list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list) + dr_icm_chunk_destroy(chunk, buddy); - list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list) - dr_icm_chunk_destroy(chunk); + dr_icm_pool_mr_destroy(buddy->icm_mr); - WARN_ON(bucket->total_chunks != 0); + mlx5dr_buddy_cleanup(buddy); - /* Cleanup of unreturned chunks */ - list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list) - dr_icm_chunk_destroy(chunk); + kvfree(buddy); } -static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool) +static struct mlx5dr_icm_chunk * +dr_icm_chunk_create(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size, + struct mlx5dr_icm_buddy_mem *buddy_mem_pool, + unsigned int seg) { - u64 hot_size = 0; - int chunk_order; + struct mlx5dr_icm_chunk *chunk; + int offset; - for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++) - hot_size += pool->buckets[chunk_order].hot_list_count * - mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type); + chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return NULL; - return hot_size; -} + offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg; + + chunk->rkey = buddy_mem_pool->icm_mr->mkey.key; + chunk->mr_addr = offset; + chunk->icm_addr = + (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset; + chunk->num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); + chunk->byte_size = + mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type); + chunk->seg = seg; + + if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) { + mlx5dr_err(pool->dmn, + "Failed to init ste arrays (order: %d)\n", + chunk_size); + goto out_free_chunk; + } -static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *bucket) -{ - u64 bytes_for_sync; + buddy_mem_pool->used_memory += chunk->byte_size; + chunk->buddy_mem = buddy_mem_pool; + INIT_LIST_HEAD(&chunk->chunk_list); - bytes_for_sync = dr_icm_hot_mem_size(pool); - if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count) - return false; + /* chunk now is part of the used_list */ + list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list); - return true; -} + return chunk; -static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket) -{ - list_splice_tail_init(&bucket->hot_list, &bucket->sync_list); - bucket->sync_list_count += bucket->hot_list_count; - bucket->hot_list_count = 0; +out_free_chunk: + kvfree(chunk); + return NULL; } -static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket) +static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) { - list_splice_tail_init(&bucket->sync_list, &bucket->free_list); - bucket->free_list_count += bucket->sync_list_count; - bucket->sync_list_count = 0; -} + if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL) + return true; -static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket) -{ - list_splice_tail_init(&bucket->sync_list, &bucket->hot_list); - bucket->hot_list_count += bucket->sync_list_count; - bucket->sync_list_count = 0; + return false; } -static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *cb, - bool buckets[DR_CHUNK_SIZE_MAX]) +static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) { - struct mlx5dr_icm_bucket *bucket; - int i; - - for (i = 0; i < pool->num_of_buckets; i++) { - bucket = &pool->buckets[i]; - if (bucket == cb) { - dr_icm_chill_bucket_start(bucket); - continue; - } + struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy; + int err; - /* Freeing the mutex is done at the end of that process, after - * sync_ste was executed at dr_icm_chill_buckets_end func. - */ - if (mutex_trylock(&bucket->mutex)) { - dr_icm_chill_bucket_start(bucket); - buckets[i] = true; - } + err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); + if (err) { + mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err); + return err; } -} -static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *cb, - bool buckets[DR_CHUNK_SIZE_MAX]) -{ - struct mlx5dr_icm_bucket *bucket; - int i; - - for (i = 0; i < pool->num_of_buckets; i++) { - bucket = &pool->buckets[i]; - if (bucket == cb) { - dr_icm_chill_bucket_end(bucket); - continue; - } + list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) { + struct mlx5dr_icm_chunk *chunk, *tmp_chunk; - if (!buckets[i]) - continue; + list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) { + mlx5dr_buddy_free_mem(buddy, chunk->seg, + ilog2(chunk->num_of_entries)); + pool->hot_memory_size -= chunk->byte_size; + dr_icm_chunk_destroy(chunk, buddy); + } - dr_icm_chill_bucket_end(bucket); - mutex_unlock(&bucket->mutex); + if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE) + dr_icm_buddy_destroy(buddy); } + + return 0; } -static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool, - struct mlx5dr_icm_bucket *cb, - bool buckets[DR_CHUNK_SIZE_MAX]) +static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool, + enum mlx5dr_icm_chunk_size chunk_size, + struct mlx5dr_icm_buddy_mem **buddy, + unsigned int *seg) { - struct mlx5dr_icm_bucket *bucket; - int i; - - for (i = 0; i < pool->num_of_buckets; i++) { - bucket = &pool->buckets[i]; - if (bucket == cb) { - dr_icm_chill_bucket_abort(bucket); - continue; - } + struct mlx5dr_icm_buddy_mem *buddy_mem_pool; + bool new_mem = false; + int err; - if (!buckets[i]) - continue; +alloc_buddy_mem: + /* find the next free place from the buddy list */ + list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) { + err = mlx5dr_buddy_alloc_mem(buddy_mem_pool, + chunk_size, seg); + if (!err) + goto found; + + if (WARN_ON(new_mem)) { + /* We have new memory pool, first in the list */ + mlx5dr_err(pool->dmn, + "No memory for order: %d\n", + chunk_size); + goto out; + } + } - dr_icm_chill_bucket_abort(bucket); - mutex_unlock(&bucket->mutex); + /* no more available allocators in that pool, create new */ + err = dr_icm_buddy_create(pool); + if (err) { + mlx5dr_err(pool->dmn, + "Failed creating buddy for order %d\n", + chunk_size); + goto out; } + + /* mark we have new memory, first in list */ + new_mem = true; + goto alloc_buddy_mem; + +found: + *buddy = buddy_mem_pool; +out: + return err; } /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and @@ -446,68 +371,48 @@ struct mlx5dr_icm_chunk * mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size) { - struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */ - bool buckets[DR_CHUNK_SIZE_MAX] = {}; - struct mlx5dr_icm_bucket *bucket; - int err; + struct mlx5dr_icm_chunk *chunk = NULL; + struct mlx5dr_icm_buddy_mem *buddy; + unsigned int seg; + int ret; if (chunk_size > pool->max_log_chunk_sz) return NULL; - bucket = &pool->buckets[chunk_size]; - - mutex_lock(&bucket->mutex); - - /* Take chunk from pool if available, otherwise allocate new chunks */ - if (list_empty(&bucket->free_list)) { - if (dr_icm_reuse_hot_entries(pool, bucket)) { - dr_icm_chill_buckets_start(pool, bucket, buckets); - err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); - if (err) { - dr_icm_chill_buckets_abort(pool, bucket, buckets); - mlx5dr_err(pool->dmn, "Sync_steering failed\n"); - chunk = NULL; - goto out; - } - dr_icm_chill_buckets_end(pool, bucket, buckets); - } else { - dr_icm_chunks_create(bucket); - } - } + mutex_lock(&pool->mutex); + /* find mem, get back the relevant buddy pool and seg in that mem */ + ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg); + if (ret) + goto out; - if (!list_empty(&bucket->free_list)) { - chunk = list_last_entry(&bucket->free_list, - struct mlx5dr_icm_chunk, - chunk_list); - if (chunk) { - list_del_init(&chunk->chunk_list); - list_add_tail(&chunk->chunk_list, &bucket->used_list); - bucket->free_list_count--; - bucket->used_list_count++; - } - } + chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg); + if (!chunk) + goto out_err; + + goto out; + +out_err: + mlx5dr_buddy_free_mem(buddy, seg, chunk_size); out: - mutex_unlock(&bucket->mutex); + mutex_unlock(&pool->mutex); return chunk; } void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk) { - struct mlx5dr_icm_bucket *bucket = chunk->bucket; + struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem; + struct mlx5dr_icm_pool *pool = buddy->pool; - if (bucket->pool->icm_type == DR_ICM_TYPE_STE) { - memset(chunk->ste_arr, 0, - bucket->num_of_entries * sizeof(chunk->ste_arr[0])); - memset(chunk->hw_ste_arr, 0, - bucket->num_of_entries * DR_STE_SIZE_REDUCED); - } + /* move the memory to the waiting list AKA "hot" */ + mutex_lock(&pool->mutex); + list_move_tail(&chunk->chunk_list, &buddy->hot_list); + pool->hot_memory_size += chunk->byte_size; + + /* Check if we have chunks that are waiting for sync-ste */ + if (dr_icm_pool_is_sync_required(pool)) + dr_icm_pool_sync_all_buddy_pools(pool); - mutex_lock(&bucket->mutex); - list_del_init(&chunk->chunk_list); - list_add_tail(&chunk->chunk_list, &bucket->hot_list); - bucket->hot_list_count++; - bucket->used_list_count--; - mutex_unlock(&bucket->mutex); + mutex_unlock(&pool->mutex); } struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, @@ -515,7 +420,6 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, { enum mlx5dr_icm_chunk_size max_log_chunk_sz; struct mlx5dr_icm_pool *pool; - int i; if (icm_type == DR_ICM_TYPE_STE) max_log_chunk_sz = dmn->info.max_log_sw_icm_sz; @@ -526,43 +430,24 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, if (!pool) return NULL; - pool->buckets = kcalloc(max_log_chunk_sz + 1, - sizeof(pool->buckets[0]), - GFP_KERNEL); - if (!pool->buckets) - goto free_pool; - pool->dmn = dmn; pool->icm_type = icm_type; pool->max_log_chunk_sz = max_log_chunk_sz; - pool->num_of_buckets = max_log_chunk_sz + 1; - INIT_LIST_HEAD(&pool->icm_mr_list); - for (i = 0; i < pool->num_of_buckets; i++) - dr_icm_bucket_init(pool, &pool->buckets[i], i); + INIT_LIST_HEAD(&pool->buddy_mem_list); - mutex_init(&pool->mr_mutex); + mutex_init(&pool->mutex); return pool; - -free_pool: - kvfree(pool); - return NULL; } void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool) { - struct mlx5dr_icm_mr *icm_mr, *next; - int i; - - mutex_destroy(&pool->mr_mutex); - - list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list) - dr_icm_pool_mr_destroy(icm_mr); + struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy; - for (i = 0; i < pool->num_of_buckets; i++) - dr_icm_bucket_cleanup(&pool->buckets[i]); + list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) + dr_icm_buddy_destroy(buddy); - kfree(pool->buckets); + mutex_destroy(&pool->mutex); kvfree(pool); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 7df883686d46..6527eb4df153 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -85,7 +85,7 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec) (_misc2)._inner_outer##_first_mpls_s_bos || \ (_misc2)._inner_outer##_first_mpls_ttl) -static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) +static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc) { return (misc->gre_key_h || misc->gre_key_l || misc->gre_protocol || misc->gre_c_present || @@ -98,12 +98,12 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc) (_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \ (_misc2).outer_first_mpls_over_##gre_udp##_ttl) -#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \ +#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \ DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp)) static bool -dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) +dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) { return (misc3->outer_vxlan_gpe_vni || misc3->outer_vxlan_gpe_next_protocol || @@ -111,21 +111,20 @@ dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3) } static bool -dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps) +dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & - MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; + return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED; } static bool -dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask, - struct mlx5dr_domain *dmn) +dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) { - return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) && - dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps); + return dr_mask_is_vxlan_gpe_set(&mask->misc3) && + dr_matcher_supp_vxlan_gpe(&dmn->info.caps); } -static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc) +static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc) { return misc->geneve_vni || misc->geneve_oam || @@ -134,26 +133,46 @@ static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc) } static bool -dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps) +dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps) { - return caps->flex_protocols & - MLX5_FLEX_PARSER_GENEVE_ENABLED; + return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED; } static bool -dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask, - struct mlx5dr_domain *dmn) +dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) { - return dr_mask_is_misc_geneve_set(&mask->misc) && - dr_matcher_supp_flex_parser_geneve(&dmn->info.caps); + return dr_mask_is_tnl_geneve_set(&mask->misc) && + dr_matcher_supp_tnl_geneve(&dmn->info.caps); } -static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3) +static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED; +} + +static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps) +{ + return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED; +} + +static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3) { return (misc3->icmpv6_type || misc3->icmpv6_code || misc3->icmpv6_header_data); } +static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask, + struct mlx5dr_domain *dmn) +{ + if (DR_MASK_IS_ICMPV4_SET(&mask->misc3)) + return dr_matcher_supp_icmp_v4(&dmn->info.caps); + else if (dr_mask_is_icmpv6_set(&mask->misc3)) + return dr_matcher_supp_icmp_v6(&dmn->info.caps); + + return false; +} + static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2) { return misc2->metadata_reg_a; @@ -257,7 +276,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (dr_mask_is_smac_set(&mask.outer) && dr_mask_is_dmac_set(&mask.outer)) { - mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask, + mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask, inner, rx); } @@ -277,8 +296,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer)) - mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, + inner, rx); } else { if (dr_mask_is_ipv4_5_tuple_set(&mask.outer)) mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, @@ -289,14 +308,12 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); } - if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn)) - mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++], - &mask, - inner, rx); - else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn)) - mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++], - &mask, - inner, rx); + if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn)) + mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask, + inner, rx); + else if (dr_mask_is_tnl_geneve(&mask, dmn)) + mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask, + inner, rx); if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer)) mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx); @@ -304,22 +321,18 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer)) mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); - if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2)) - mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, - inner, rx); + if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) + mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); - if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) && - mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) || - (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) && - mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) { - ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++], - &mask, &dmn->info.caps, - inner, rx); + if (dr_mask_is_icmp(&mask, dmn)) { + ret = mlx5dr_ste_build_icmp(&sb[idx++], + &mask, &dmn->info.caps, + inner, rx); if (ret) return ret; } - if (dr_mask_is_gre_set(&mask.misc)) - mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx); + if (dr_mask_is_tnl_gre_set(&mask.misc)) + mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx); } /* Inner */ @@ -334,7 +347,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (dr_mask_is_smac_set(&mask.inner) && dr_mask_is_dmac_set(&mask.inner)) { - mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], + mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask, inner, rx); } @@ -354,8 +367,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, inner, rx); if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner)) - mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask, - inner, rx); + mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask, + inner, rx); } else { if (dr_mask_is_ipv4_5_tuple_set(&mask.inner)) mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask, @@ -372,8 +385,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner)) mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx); - if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2)) - mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx); + if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2)) + mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx); } /* Empty matcher, takes all */ if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) @@ -630,7 +643,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher, } if (mask) { - if (mask->match_sz > sizeof(struct mlx5dr_match_param)) { + if (mask->match_sz > DR_SZ_MATCH_PARAM) { mlx5dr_err(dmn, "Invalid match size attribute\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index b3c9dc032026..6d73719db1f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -874,8 +874,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher, u32 s_idx, e_idx; if (!value_size || - (value_size > sizeof(struct mlx5dr_match_param) || - (value_size % sizeof(u32)))) { + (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) { mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n"); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index b01aaec75622..d275823bff2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -1090,7 +1090,7 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx) { @@ -1594,9 +1594,9 @@ static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask); @@ -1693,8 +1693,8 @@ static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, bool inner, bool rx) +void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, bool inner, bool rx) { dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask); @@ -1771,9 +1771,9 @@ static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask); @@ -1792,8 +1792,8 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask, struct mlx5dr_cmd_caps *caps, u8 *bit_mask) { + bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3); struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3; - bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask); u32 icmp_header_data_mask; u32 icmp_type_mask; u32 icmp_code_mask; @@ -1869,7 +1869,7 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, u32 icmp_code; bool is_ipv4; - is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3); + is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3); if (is_ipv4) { icmp_header_data = misc_3->icmpv4_header_data; icmp_type = misc_3->icmpv4_type; @@ -1928,10 +1928,10 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value, return 0; } -int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - bool inner, bool rx) +int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx) { int ret; @@ -2069,9 +2069,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner, sb->bit_mask); @@ -2122,9 +2122,9 @@ dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value, return 0; } -void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx) +void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx) { dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask); sb->rx = rx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index cf62ea4f882e..51880df26724 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -17,6 +17,7 @@ #define WIRE_PORT 0xFFFF #define DR_STE_SVLAN 0x1 #define DR_STE_CVLAN 0x2 +#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg) #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg) @@ -114,7 +115,7 @@ enum mlx5dr_ipv { struct mlx5dr_icm_pool; struct mlx5dr_icm_chunk; -struct mlx5dr_icm_bucket; +struct mlx5dr_icm_buddy_mem; struct mlx5dr_ste_htbl; struct mlx5dr_match_param; struct mlx5dr_cmd_caps; @@ -288,7 +289,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, struct mlx5dr_match_param *value, u8 *ste_arr); -void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder, +void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder, struct mlx5dr_match_param *mask, bool inner, bool rx); void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb, @@ -312,31 +313,31 @@ void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb, void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb, +void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); +int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + struct mlx5dr_cmd_caps *caps, + bool inner, bool rx); +void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); -int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - struct mlx5dr_cmd_caps *caps, - bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); -void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb, - struct mlx5dr_match_param *mask, - bool inner, bool rx); +void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb, + struct mlx5dr_match_param *mask, + bool inner, bool rx); void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb, struct mlx5dr_match_param *mask, bool inner, bool rx); @@ -588,9 +589,9 @@ struct mlx5dr_match_param { struct mlx5dr_match_misc3 misc3; }; -#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ - (_misc3)->icmpv4_code || \ - (_misc3)->icmpv4_header_data) +#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \ + (_misc3)->icmpv4_code || \ + (_misc3)->icmpv4_header_data) struct mlx5dr_esw_caps { u64 drop_icm_address_rx; @@ -732,7 +733,6 @@ struct mlx5dr_action { struct mlx5dr_domain *dmn; struct mlx5dr_icm_chunk *chunk; u8 *data; - u32 data_size; u16 num_of_actions; u32 index; u8 allow_rx:1; @@ -805,7 +805,7 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste, struct mlx5dr_ste *ste); struct mlx5dr_icm_chunk { - struct mlx5dr_icm_bucket *bucket; + struct mlx5dr_icm_buddy_mem *buddy_mem; struct list_head chunk_list; u32 rkey; u32 num_of_entries; @@ -813,6 +813,11 @@ struct mlx5dr_icm_chunk { u64 icm_addr; u64 mr_addr; + /* indicates the index of this chunk in the whole memory, + * used for deleting the chunk from the buddy + */ + unsigned int seg; + /* Memory optimisation */ struct mlx5dr_ste *ste_arr; u8 *hw_ste_arr; @@ -841,23 +846,20 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn) mlx5dr_domain_nic_unlock(&dmn->info.rx); } -static inline int -mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps) -{ - return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED; -} - -static inline int -mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps) -{ - return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED; -} - int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_matcher_rx_tx *nic_matcher, enum mlx5dr_ipv outer_ipv, enum mlx5dr_ipv inner_ipv); +static inline int +mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type) +{ + if (icm_type == DR_ICM_TYPE_STE) + return DR_STE_SIZE; + + return DR_MODIFY_ACTION_SIZE; +} + static inline u32 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size) { @@ -871,11 +873,7 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size, int num_of_entries; int entry_size; - if (icm_type == DR_ICM_TYPE_STE) - entry_size = DR_STE_SIZE; - else - entry_size = DR_MODIFY_ACTION_SIZE; - + entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type); num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size); return entry_size * num_of_entries; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 7914fe3fc68d..4177786b8eaf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -127,4 +127,36 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev) return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner); } +/* buddy functions & structure */ + +struct mlx5dr_icm_mr; + +struct mlx5dr_icm_buddy_mem { + unsigned long **bitmap; + unsigned int *num_free; + u32 max_order; + struct list_head list_node; + struct mlx5dr_icm_mr *icm_mr; + struct mlx5dr_icm_pool *pool; + + /* This is the list of used chunks. HW may be accessing this memory */ + struct list_head used_list; + u64 used_memory; + + /* Hardware may be accessing this memory but at some future, + * undetermined time, it might cease to do so. + * sync_ste command sets them free. + */ + struct list_head hot_list; +}; + +int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int max_order); +void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy); +int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int order, + unsigned int *segment); +void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy, + unsigned int seg, unsigned int order); + #endif /* _MLX5DR_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index bcd166911d44..46245e0b2462 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -368,7 +368,6 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev, } mlxfw_info(mlxfw_dev, "Initialize firmware flash process\n"); - devlink_flash_update_begin_notify(mlxfw_dev->devlink); mlxfw_status_notify(mlxfw_dev, "Initializing firmware flash process", NULL, 0, 0); err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle); @@ -417,7 +416,6 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev, mlxfw_info(mlxfw_dev, "Firmware flash done\n"); mlxfw_status_notify(mlxfw_dev, "Firmware flash done", NULL, 0, 0); mlxfw_mfa2_file_fini(mfa2_file); - devlink_flash_update_end_notify(mlxfw_dev->devlink); return 0; err_state_wait_activate_to_locked: @@ -429,7 +427,6 @@ err_state_wait_idle_to_locked: mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle); err_fsm_lock: mlxfw_mfa2_file_fini(mfa2_file); - devlink_flash_update_end_notify(mlxfw_dev->devlink); return err; } EXPORT_SYMBOL(mlxfw_firmware_flash); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 892724380ea2..f545fd2c5896 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -15,6 +15,7 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o spectrum_router.o \ + spectrum_router_xm.o \ spectrum1_kvdl.o spectrum2_kvdl.o \ spectrum_kvdl.o \ spectrum_acl_tcam.o spectrum_acl_ctcam.o \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 5ffdfb532cb7..392ce3cb27f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -343,6 +343,23 @@ static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core, 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE); } +/* cmd_mbox_xm_num_local_ports + * Number of local_ports connected to the xm. + * Each local port is a 4x + * Spectrum-2/3: 25G + * Spectrum-4: 50G + */ +MLXSW_ITEM32(cmd_mbox, boardinfo, xm_num_local_ports, 0x00, 4, 3); + +/* cmd_mbox_xm_exists + * An XM (eXtanded Mezanine, e.g. used for the XLT) is connected on the board. + */ +MLXSW_ITEM32(cmd_mbox, boardinfo, xm_exists, 0x00, 0, 1); + +/* cmd_mbox_xm_local_port_entry + */ +MLXSW_ITEM_BIT_ARRAY(cmd_mbox, boardinfo, xm_local_port_entry, 0x04, 4, 8); + /* cmd_mbox_boardinfo_intapin * When PCIe interrupt messages are being used, this value is used for clearing * an interrupt. When using MSI-X, this register is not used. @@ -657,6 +674,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1); +/* cmd_mbox_config_set_kvh_xlt_cache_mode + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvh_xlt_cache_mode, 0x08, 3, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -783,6 +806,13 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16); */ MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); +/* cmd_mbox_config_profile_kvh_xlt_cache_mode + * KVH XLT cache mode: + * 0 - XLT can use all KVH as best-effort + * 1 - XLT cache uses 1/2 KVH + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvh_xlt_cache_mode, 0x50, 8, 4); + /* cmd_mbox_config_kvd_linear_size * KVD Linear Size * Valid for Spectrum only diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 1a86535c4968..685037e052af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -160,6 +160,7 @@ struct mlxsw_rx_listener_item { struct mlxsw_event_listener_item { struct list_head list; + struct mlxsw_core *mlxsw_core; struct mlxsw_event_listener el; void *priv; }; @@ -1117,16 +1118,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core, struct devlink_flash_update_params *params, struct netlink_ext_ack *extack) { - const struct firmware *firmware; - int err; - - err = request_firmware_direct(&firmware, params->file_name, mlxsw_core->bus_info->dev); - if (err) - return err; - err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack); - release_firmware(firmware); - - return err; + return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack); } static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, @@ -2180,11 +2172,16 @@ static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port, void *priv) { struct mlxsw_event_listener_item *event_listener_item = priv; + struct mlxsw_core *mlxsw_core; struct mlxsw_reg_info reg; char *payload; char *reg_tlv; char *op_tlv; + mlxsw_core = event_listener_item->mlxsw_core; + trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, + skb->data, skb->len); + mlxsw_emad_tlv_parse(skb); op_tlv = mlxsw_emad_op_tlv(skb); reg_tlv = mlxsw_emad_reg_tlv(skb); @@ -2234,6 +2231,7 @@ int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core, el_item = kmalloc(sizeof(*el_item), GFP_KERNEL); if (!el_item) return -ENOMEM; + el_item->mlxsw_core = mlxsw_core; el_item->el = *el; el_item->priv = priv; @@ -2858,6 +2856,18 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get); +bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port) +{ + const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info; + int i; + + for (i = 0; i < bus_info->xm_local_ports_count; i++) + if (bus_info->xm_local_ports[i] == local_port) + return true; + return false; +} +EXPORT_SYMBOL(mlxsw_core_port_is_xm); + struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) { return mlxsw_core->env; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 92f7398287be..6b3ccbf6b238 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -223,6 +223,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, struct devlink_port * mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u8 local_port); +bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core); int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module); @@ -255,7 +256,8 @@ struct mlxsw_config_profile { used_max_pkey:1, used_ar_sec:1, used_adaptive_routing_group_cap:1, - used_kvd_sizes:1; + used_kvd_sizes:1, + used_kvh_xlt_cache_mode:1; u8 max_vepa_channels; u16 max_mid; u16 max_pgt; @@ -277,6 +279,7 @@ struct mlxsw_config_profile { u32 kvd_linear_size; u8 kvd_hash_single_parts; u8 kvd_hash_double_parts; + u8 kvh_xlt_cache_mode; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -435,6 +438,8 @@ struct mlxsw_fw_rev { u16 can_reset_minor; }; +#define MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX 4 + struct mlxsw_bus_info { const char *device_kind; const char *device_name; @@ -443,7 +448,10 @@ struct mlxsw_bus_info { u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN]; u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; u8 low_frequency:1, - read_frc_capable:1; + read_frc_capable:1, + xm_exists:1; + u8 xm_local_ports_count; + u8 xm_local_ports[MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX]; }; struct mlxsw_hwmon; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 9f6905fa6b47..f1b09c2f9eda 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -133,10 +133,8 @@ mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk, } struct mlxsw_afk_picker { - struct { - DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); - unsigned int total; - } hits[0]; + DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX); + unsigned int total; }; static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, @@ -154,8 +152,8 @@ static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk, elinst = &block->instances[j]; if (elinst->element == element) { - __set_bit(element, picker->hits[i].element); - picker->hits[i].total++; + __set_bit(element, picker[i].element); + picker[i].total++; } } } @@ -169,13 +167,13 @@ static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk, int i; int j; - memcpy(&hits_element, &picker->hits[block_index].element, + memcpy(&hits_element, &picker[block_index].element, sizeof(hits_element)); for (i = 0; i < mlxsw_afk->blocks_count; i++) { for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) { - if (__test_and_clear_bit(j, picker->hits[i].element)) - picker->hits[i].total--; + if (__test_and_clear_bit(j, picker[i].element)) + picker[i].total--; } } } @@ -188,8 +186,8 @@ static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk, int i; for (i = 0; i < mlxsw_afk->blocks_count; i++) { - if (picker->hits[i].total > most_hits) { - most_hits = picker->hits[i].total; + if (picker[i].total > most_hits) { + most_hits = picker[i].total; most_index = i; } } @@ -206,7 +204,7 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk, if (key_info->blocks_count == mlxsw_afk->max_blocks) return -EINVAL; - for_each_set_bit(element, picker->hits[block_index].element, + for_each_set_bit(element, picker[block_index].element, MLXSW_AFK_ELEMENT_MAX) { key_info->element_to_block[element] = key_info->blocks_count; mlxsw_afk_element_usage_add(&key_info->elusage, element); @@ -224,11 +222,9 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk, { struct mlxsw_afk_picker *picker; enum mlxsw_afk_element element; - size_t alloc_size; int err; - alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count; - picker = kzalloc(alloc_size, GFP_KERNEL); + picker = kcalloc(mlxsw_afk->blocks_count, sizeof(*picker), GFP_KERNEL); if (!picker) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index 8e36a2634ef5..2b23f8a87862 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -4,6 +4,9 @@ #ifndef _MLXSW_CORE_ENV_H #define _MLXSW_CORE_ENV_H +struct ethtool_modinfo; +struct ethtool_eeprom; + int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, int off, int *temp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index c010db2c9dba..b34c44723f8b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -291,7 +291,8 @@ static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) /* Create port objects for each valid entry */ for (i = 0; i < mlxsw_m->max_ports; i++) { - if (mlxsw_m->module_to_port[i] > 0) { + if (mlxsw_m->module_to_port[i] > 0 && + !mlxsw_core_port_is_xm(mlxsw_m->core, i)) { err = mlxsw_m_port_create(mlxsw_m, mlxsw_m->module_to_port[i], i); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 641cdd81882b..4eeae8d78006 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1196,6 +1196,12 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox, MLXSW_RES_GET(res, KVD_DOUBLE_SIZE)); } + if (profile->used_kvh_xlt_cache_mode) { + mlxsw_cmd_mbox_config_profile_set_kvh_xlt_cache_mode_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvh_xlt_cache_mode_set( + mbox, profile->kvh_xlt_cache_mode); + } for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, @@ -1209,6 +1215,30 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); } +static int mlxsw_pci_boardinfo_xm_process(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_bus_info *bus_info, + char *mbox) +{ + int count = mlxsw_cmd_mbox_boardinfo_xm_num_local_ports_get(mbox); + int i; + + if (!mlxsw_cmd_mbox_boardinfo_xm_exists_get(mbox)) + return 0; + + bus_info->xm_exists = true; + + if (count > MLXSW_BUS_INFO_XM_LOCAL_PORTS_MAX) { + dev_err(&mlxsw_pci->pdev->dev, "Invalid number of XM local ports\n"); + return -EINVAL; + } + bus_info->xm_local_ports_count = count; + for (i = 0; i < count; i++) + bus_info->xm_local_ports[i] = + mlxsw_cmd_mbox_boardinfo_xm_local_port_entry_get(mbox, + i); + return 0; +} + static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) { struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; @@ -1220,7 +1250,8 @@ static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) return err; mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd); mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid); - return 0; + + return mlxsw_pci_boardinfo_xm_process(mlxsw_pci, bus_info, mbox); } static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 39eff6a57ba2..16e2df6ef2f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -581,6 +581,13 @@ mlxsw_reg_sfd_uc_tunnel_pack(char *payload, int rec_index, mlxsw_reg_sfd_uc_tunnel_protocol_set(payload, rec_index, proto); } +enum mlxsw_reg_tunnel_port { + MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_TUNNEL_PORT_VPLS, + MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL0, + MLXSW_REG_TUNNEL_PORT_FLEX_TUNNEL1, +}; + /* SFN - Switch FDB Notification Register * ------------------------------------------- * The switch provides notifications on newly learned FDB entries and @@ -738,13 +745,6 @@ MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27, MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0, 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false); -enum mlxsw_reg_sfn_tunnel_port { - MLXSW_REG_SFN_TUNNEL_PORT_NVE, - MLXSW_REG_SFN_TUNNEL_PORT_VPLS, - MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0, - MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1, -}; - /* reg_sfn_uc_tunnel_port * Tunnel port. * Reserved on Spectrum. @@ -821,8 +821,16 @@ static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid, MLXSW_REG_DEFINE(spvid, MLXSW_REG_SPVID_ID, MLXSW_REG_SPVID_LEN); +/* reg_spvid_tport + * Port is tunnel port. + * Reserved when SwitchX/-2 or Spectrum-1. + * Access: Index + */ +MLXSW_ITEM32(reg, spvid, tport, 0x00, 24, 1); + /* reg_spvid_local_port - * Local port number. + * When tport = 0: Local port number. Not supported for CPU port. + * When tport = 1: Tunnel port. * Access: Index */ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); @@ -834,17 +842,30 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); +/* reg_spvid_et_vlan + * EtherType used for when VLAN is pushed at ingress (for untagged + * packets or for QinQ push mode). + * 0: ether_type0 - (default) + * 1: ether_type1 + * 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2 + * Ethertype IDs are configured by SVER. + * Access: RW + */ +MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2); + /* reg_spvid_pvid * Port default VID * Access: RW */ MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12); -static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) +static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid, + u8 et_vlan) { MLXSW_REG_ZERO(spvid, payload); mlxsw_reg_spvid_local_port_set(payload, local_port); mlxsw_reg_spvid_pvid_set(payload, pvid); + mlxsw_reg_spvid_et_vlan_set(payload, et_vlan); } /* SPVM - Switch Port VLAN Membership @@ -1680,6 +1701,109 @@ static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, mlxsw_reg_svfa_vid_set(payload, vid); } +/* SPVTR - Switch Port VLAN Stacking Register + * ------------------------------------------ + * The Switch Port VLAN Stacking register configures the VLAN mode of the port + * to enable VLAN stacking. + */ +#define MLXSW_REG_SPVTR_ID 0x201D +#define MLXSW_REG_SPVTR_LEN 0x10 + +MLXSW_REG_DEFINE(spvtr, MLXSW_REG_SPVTR_ID, MLXSW_REG_SPVTR_LEN); + +/* reg_spvtr_tport + * Port is tunnel port. + * Access: Index + * + * Note: Reserved when SwitchX/-2 or Spectrum-1. + */ +MLXSW_ITEM32(reg, spvtr, tport, 0x00, 24, 1); + +/* reg_spvtr_local_port + * When tport = 0: local port number (Not supported from/to CPU). + * When tport = 1: tunnel port. + * Access: Index + */ +MLXSW_ITEM32(reg, spvtr, local_port, 0x00, 16, 8); + +/* reg_spvtr_ippe + * Ingress Port Prio Mode Update Enable. + * When set, the Port Prio Mode is updated with the provided ipprio_mode field. + * Reserved on Get operations. + * Access: OP + */ +MLXSW_ITEM32(reg, spvtr, ippe, 0x04, 31, 1); + +/* reg_spvtr_ipve + * Ingress Port VID Mode Update Enable. + * When set, the Ingress Port VID Mode is updated with the provided ipvid_mode + * field. + * Reserved on Get operations. + * Access: OP + */ +MLXSW_ITEM32(reg, spvtr, ipve, 0x04, 30, 1); + +/* reg_spvtr_epve + * Egress Port VID Mode Update Enable. + * When set, the Egress Port VID Mode is updated with the provided epvid_mode + * field. + * Access: OP + */ +MLXSW_ITEM32(reg, spvtr, epve, 0x04, 29, 1); + +/* reg_spvtr_ipprio_mode + * Ingress Port Priority Mode. + * This controls the PCP and DEI of the new outer VLAN + * Note: for SwitchX/-2 the DEI is not affected. + * 0: use port default PCP and DEI (configured by QPDPC). + * 1: use C-VLAN PCP and DEI. + * Has no effect when ipvid_mode = 0. + * Reserved when tport = 1. + * Access: RW + */ +MLXSW_ITEM32(reg, spvtr, ipprio_mode, 0x04, 20, 4); + +enum mlxsw_reg_spvtr_ipvid_mode { + /* IEEE Compliant PVID (default) */ + MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID, + /* Push VLAN (for VLAN stacking, except prio tagged packets) */ + MLXSW_REG_SPVTR_IPVID_MODE_PUSH_VLAN_FOR_UNTAGGED_PACKET, + /* Always push VLAN (also for prio tagged packets) */ + MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN, +}; + +/* reg_spvtr_ipvid_mode + * Ingress Port VLAN-ID Mode. + * For Spectrum family, this affects the values of SPVM.i + * Access: RW + */ +MLXSW_ITEM32(reg, spvtr, ipvid_mode, 0x04, 16, 4); + +enum mlxsw_reg_spvtr_epvid_mode { + /* IEEE Compliant VLAN membership */ + MLXSW_REG_SPVTR_EPVID_MODE_IEEE_COMPLIANT_VLAN_MEMBERSHIP, + /* Pop VLAN (for VLAN stacking) */ + MLXSW_REG_SPVTR_EPVID_MODE_POP_VLAN, +}; + +/* reg_spvtr_epvid_mode + * Egress Port VLAN-ID Mode. + * For Spectrum family, this affects the values of SPVM.e,u,pt. + * Access: WO + */ +MLXSW_ITEM32(reg, spvtr, epvid_mode, 0x04, 0, 4); + +static inline void mlxsw_reg_spvtr_pack(char *payload, bool tport, + u8 local_port, + enum mlxsw_reg_spvtr_ipvid_mode ipvid_mode) +{ + MLXSW_REG_ZERO(spvtr, payload); + mlxsw_reg_spvtr_tport_set(payload, tport); + mlxsw_reg_spvtr_local_port_set(payload, local_port); + mlxsw_reg_spvtr_ipvid_mode_set(payload, ipvid_mode); + mlxsw_reg_spvtr_ipve_set(payload, true); +} + /* SVPE - Switch Virtual-Port Enabling Register * -------------------------------------------- * Enables port virtualization. @@ -1857,6 +1981,104 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* SPVC - Switch Port VLAN Classification Register + * ----------------------------------------------- + * Configures the port to identify packets as untagged / single tagged / + * double packets based on the packet EtherTypes. + * Ethertype IDs are configured by SVER. + */ +#define MLXSW_REG_SPVC_ID 0x2026 +#define MLXSW_REG_SPVC_LEN 0x0C + +MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN); + +/* reg_spvc_local_port + * Local port. + * Access: Index + * + * Note: applies both to Rx port and Tx port, so if a packet traverses + * through Rx port i and a Tx port j then port i and port j must have the + * same configuration. + */ +MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8); + +/* reg_spvc_inner_et2 + * Vlan Tag1 EtherType2 enable. + * Packet is initially classified as double VLAN Tag if in addition to + * being classified with a tag0 VLAN Tag its tag1 EtherType value is + * equal to ether_type2. + * 0: disable (default) + * 1: enable + * Access: RW + */ +MLXSW_ITEM32(reg, spvc, inner_et2, 0x08, 17, 1); + +/* reg_spvc_et2 + * Vlan Tag0 EtherType2 enable. + * Packet is initially classified as VLAN Tag if its tag0 EtherType is + * equal to ether_type2. + * 0: disable (default) + * 1: enable + * Access: RW + */ +MLXSW_ITEM32(reg, spvc, et2, 0x08, 16, 1); + +/* reg_spvc_inner_et1 + * Vlan Tag1 EtherType1 enable. + * Packet is initially classified as double VLAN Tag if in addition to + * being classified with a tag0 VLAN Tag its tag1 EtherType value is + * equal to ether_type1. + * 0: disable + * 1: enable (default) + * Access: RW + */ +MLXSW_ITEM32(reg, spvc, inner_et1, 0x08, 9, 1); + +/* reg_spvc_et1 + * Vlan Tag0 EtherType1 enable. + * Packet is initially classified as VLAN Tag if its tag0 EtherType is + * equal to ether_type1. + * 0: disable + * 1: enable (default) + * Access: RW + */ +MLXSW_ITEM32(reg, spvc, et1, 0x08, 8, 1); + +/* reg_inner_et0 + * Vlan Tag1 EtherType0 enable. + * Packet is initially classified as double VLAN Tag if in addition to + * being classified with a tag0 VLAN Tag its tag1 EtherType value is + * equal to ether_type0. + * 0: disable + * 1: enable (default) + * Access: RW + */ +MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1); + +/* reg_et0 + * Vlan Tag0 EtherType0 enable. + * Packet is initially classified as VLAN Tag if its tag0 EtherType is + * equal to ether_type0. + * 0: disable + * 1: enable (default) + * Access: RW + */ +MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1); + +static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1, + bool et0) +{ + MLXSW_REG_ZERO(spvc, payload); + mlxsw_reg_spvc_local_port_set(payload, local_port); + /* Enable inner_et1 and inner_et0 to enable identification of double + * tagged packets. + */ + mlxsw_reg_spvc_inner_et1_set(payload, 1); + mlxsw_reg_spvc_inner_et0_set(payload, 1); + mlxsw_reg_spvc_et1_set(payload, et1); + mlxsw_reg_spvc_et0_set(payload, et0); +} + /* CWTP - Congetion WRED ECN TClass Profile * ---------------------------------------- * Configures the profiles for queues of egress port and traffic class @@ -7279,10 +7501,11 @@ static inline void mlxsw_reg_ralue_pack4(char *payload, enum mlxsw_reg_ralxx_protocol protocol, enum mlxsw_reg_ralue_op op, u16 virtual_router, u8 prefix_len, - u32 dip) + u32 *dip) { mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); - mlxsw_reg_ralue_dip4_set(payload, dip); + if (dip) + mlxsw_reg_ralue_dip4_set(payload, *dip); } static inline void mlxsw_reg_ralue_pack6(char *payload, @@ -7292,7 +7515,8 @@ static inline void mlxsw_reg_ralue_pack6(char *payload, const void *dip) { mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); - mlxsw_reg_ralue_dip6_memcpy_to(payload, dip); + if (dip) + mlxsw_reg_ralue_dip6_memcpy_to(payload, dip); } static inline void @@ -8245,6 +8469,658 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router, mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask); } +/* RXLTE - Router XLT Enable Register + * ---------------------------------- + * The RXLTE enables XLT (eXtended Lookup Table) LPM lookups if a capable + * XM is present on the system. + */ + +#define MLXSW_REG_RXLTE_ID 0x8050 +#define MLXSW_REG_RXLTE_LEN 0x0C + +MLXSW_REG_DEFINE(rxlte, MLXSW_REG_RXLTE_ID, MLXSW_REG_RXLTE_LEN); + +/* reg_rxlte_virtual_router + * Virtual router ID associated with the router interface. + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ +MLXSW_ITEM32(reg, rxlte, virtual_router, 0x00, 0, 16); + +enum mlxsw_reg_rxlte_protocol { + MLXSW_REG_RXLTE_PROTOCOL_IPV4, + MLXSW_REG_RXLTE_PROTOCOL_IPV6, +}; + +/* reg_rxlte_protocol + * Access: Index + */ +MLXSW_ITEM32(reg, rxlte, protocol, 0x04, 0, 4); + +/* reg_rxlte_lpm_xlt_en + * Access: RW + */ +MLXSW_ITEM32(reg, rxlte, lpm_xlt_en, 0x08, 0, 1); + +static inline void mlxsw_reg_rxlte_pack(char *payload, u16 virtual_router, + enum mlxsw_reg_rxlte_protocol protocol, + bool lpm_xlt_en) +{ + MLXSW_REG_ZERO(rxlte, payload); + mlxsw_reg_rxlte_virtual_router_set(payload, virtual_router); + mlxsw_reg_rxlte_protocol_set(payload, protocol); + mlxsw_reg_rxlte_lpm_xlt_en_set(payload, lpm_xlt_en); +} + +/* RXLTM - Router XLT M select Register + * ------------------------------------ + * The RXLTM configures and selects the M for the XM lookups. + */ + +#define MLXSW_REG_RXLTM_ID 0x8051 +#define MLXSW_REG_RXLTM_LEN 0x14 + +MLXSW_REG_DEFINE(rxltm, MLXSW_REG_RXLTM_ID, MLXSW_REG_RXLTM_LEN); + +/* reg_rxltm_m0_val_v6 + * Global M0 value For IPv6. + * Range 0..128 + * Access: RW + */ +MLXSW_ITEM32(reg, rxltm, m0_val_v6, 0x10, 16, 8); + +/* reg_rxltm_m0_val_v4 + * Global M0 value For IPv4. + * Range 0..32 + * Access: RW + */ +MLXSW_ITEM32(reg, rxltm, m0_val_v4, 0x10, 0, 6); + +static inline void mlxsw_reg_rxltm_pack(char *payload, u8 m0_val_v4, u8 m0_val_v6) +{ + MLXSW_REG_ZERO(rxltm, payload); + mlxsw_reg_rxltm_m0_val_v6_set(payload, m0_val_v6); + mlxsw_reg_rxltm_m0_val_v4_set(payload, m0_val_v4); +} + +/* RLCMLD - Router LPM Cache ML Delete Register + * -------------------------------------------- + * The RLCMLD register is used to bulk delete the XLT-LPM cache ML entries. + * This can be used by SW when L is increased or decreased, thus need to + * remove entries with old ML values. + */ + +#define MLXSW_REG_RLCMLD_ID 0x8055 +#define MLXSW_REG_RLCMLD_LEN 0x30 + +MLXSW_REG_DEFINE(rlcmld, MLXSW_REG_RLCMLD_ID, MLXSW_REG_RLCMLD_LEN); + +enum mlxsw_reg_rlcmld_select { + MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES, + MLXSW_REG_RLCMLD_SELECT_M_ENTRIES, + MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES, +}; + +/* reg_rlcmld_select + * Which entries to delete. + * Access: Index + */ +MLXSW_ITEM32(reg, rlcmld, select, 0x00, 16, 2); + +enum mlxsw_reg_rlcmld_filter_fields { + MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL = 0x04, + MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER = 0x08, + MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP = 0x10, +}; + +/* reg_rlcmld_filter_fields + * If a bit is '0' then the relevant field is ignored. + * Access: Index + */ +MLXSW_ITEM32(reg, rlcmld, filter_fields, 0x00, 0, 8); + +enum mlxsw_reg_rlcmld_protocol { + MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4, + MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6, +}; + +/* reg_rlcmld_protocol + * Access: Index + */ +MLXSW_ITEM32(reg, rlcmld, protocol, 0x08, 0, 4); + +/* reg_rlcmld_virtual_router + * Virtual router ID. + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ +MLXSW_ITEM32(reg, rlcmld, virtual_router, 0x0C, 0, 16); + +/* reg_rlcmld_dip + * The prefix of the route or of the marker that the object of the LPM + * is compared with. The most significant bits of the dip are the prefix. + * Access: Index + */ +MLXSW_ITEM32(reg, rlcmld, dip4, 0x1C, 0, 32); +MLXSW_ITEM_BUF(reg, rlcmld, dip6, 0x10, 16); + +/* reg_rlcmld_dip_mask + * per bit: + * 0: no match + * 1: match + * Access: Index + */ +MLXSW_ITEM32(reg, rlcmld, dip_mask4, 0x2C, 0, 32); +MLXSW_ITEM_BUF(reg, rlcmld, dip_mask6, 0x20, 16); + +static inline void __mlxsw_reg_rlcmld_pack(char *payload, + enum mlxsw_reg_rlcmld_select select, + enum mlxsw_reg_rlcmld_protocol protocol, + u16 virtual_router) +{ + u8 filter_fields = MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_PROTOCOL | + MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_VIRTUAL_ROUTER | + MLXSW_REG_RLCMLD_FILTER_FIELDS_BY_DIP; + + MLXSW_REG_ZERO(rlcmld, payload); + mlxsw_reg_rlcmld_select_set(payload, select); + mlxsw_reg_rlcmld_filter_fields_set(payload, filter_fields); + mlxsw_reg_rlcmld_protocol_set(payload, protocol); + mlxsw_reg_rlcmld_virtual_router_set(payload, virtual_router); +} + +static inline void mlxsw_reg_rlcmld_pack4(char *payload, + enum mlxsw_reg_rlcmld_select select, + u16 virtual_router, + u32 dip, u32 dip_mask) +{ + __mlxsw_reg_rlcmld_pack(payload, select, + MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV4, + virtual_router); + mlxsw_reg_rlcmld_dip4_set(payload, dip); + mlxsw_reg_rlcmld_dip_mask4_set(payload, dip_mask); +} + +static inline void mlxsw_reg_rlcmld_pack6(char *payload, + enum mlxsw_reg_rlcmld_select select, + u16 virtual_router, + const void *dip, const void *dip_mask) +{ + __mlxsw_reg_rlcmld_pack(payload, select, + MLXSW_REG_RLCMLD_PROTOCOL_UC_IPV6, + virtual_router); + mlxsw_reg_rlcmld_dip6_memcpy_to(payload, dip); + mlxsw_reg_rlcmld_dip_mask6_memcpy_to(payload, dip_mask); +} + +/* RLPMCE - Router LPM Cache Enable Register + * ----------------------------------------- + * Allows disabling the LPM cache. Can be changed on the fly. + */ + +#define MLXSW_REG_RLPMCE_ID 0x8056 +#define MLXSW_REG_RLPMCE_LEN 0x4 + +MLXSW_REG_DEFINE(rlpmce, MLXSW_REG_RLPMCE_ID, MLXSW_REG_RLPMCE_LEN); + +/* reg_rlpmce_flush + * Flush: + * 0: do not flush the cache (default) + * 1: flush (clear) the cache + * Access: WO + */ +MLXSW_ITEM32(reg, rlpmce, flush, 0x00, 4, 1); + +/* reg_rlpmce_disable + * LPM cache: + * 0: enabled (default) + * 1: disabled + * Access: RW + */ +MLXSW_ITEM32(reg, rlpmce, disable, 0x00, 0, 1); + +static inline void mlxsw_reg_rlpmce_pack(char *payload, bool flush, + bool disable) +{ + MLXSW_REG_ZERO(rlpmce, payload); + mlxsw_reg_rlpmce_flush_set(payload, flush); + mlxsw_reg_rlpmce_disable_set(payload, disable); +} + +/* Note that XLTQ, XMDR, XRMT and XRALXX register positions violate the rule + * of ordering register definitions by the ID. However, XRALXX pack helpers are + * using RALXX pack helpers, RALXX registers have higher IDs. + * Also XMDR is using RALUE enums. XLRQ and XRMT are just put alongside with the + * related registers. + */ + +/* XLTQ - XM Lookup Table Query Register + * ------------------------------------- + */ +#define MLXSW_REG_XLTQ_ID 0x7802 +#define MLXSW_REG_XLTQ_LEN 0x2C + +MLXSW_REG_DEFINE(xltq, MLXSW_REG_XLTQ_ID, MLXSW_REG_XLTQ_LEN); + +enum mlxsw_reg_xltq_xm_device_id { + MLXSW_REG_XLTQ_XM_DEVICE_ID_UNKNOWN, + MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT = 0xCF71, +}; + +/* reg_xltq_xm_device_id + * XM device ID. + * Access: RO + */ +MLXSW_ITEM32(reg, xltq, xm_device_id, 0x04, 0, 16); + +/* reg_xltq_xlt_cap_ipv4_lpm + * Access: RO + */ +MLXSW_ITEM32(reg, xltq, xlt_cap_ipv4_lpm, 0x10, 0, 1); + +/* reg_xltq_xlt_cap_ipv6_lpm + * Access: RO + */ +MLXSW_ITEM32(reg, xltq, xlt_cap_ipv6_lpm, 0x10, 1, 1); + +/* reg_xltq_cap_xlt_entries + * Number of XLT entries + * Note: SW must not fill more than 80% in order to avoid overflow + * Access: RO + */ +MLXSW_ITEM32(reg, xltq, cap_xlt_entries, 0x20, 0, 32); + +/* reg_xltq_cap_xlt_mtable + * XLT M-Table max size + * Access: RO + */ +MLXSW_ITEM32(reg, xltq, cap_xlt_mtable, 0x24, 0, 32); + +static inline void mlxsw_reg_xltq_pack(char *payload) +{ + MLXSW_REG_ZERO(xltq, payload); +} + +static inline void mlxsw_reg_xltq_unpack(char *payload, u16 *xm_device_id, bool *xlt_cap_ipv4_lpm, + bool *xlt_cap_ipv6_lpm, u32 *cap_xlt_entries, + u32 *cap_xlt_mtable) +{ + *xm_device_id = mlxsw_reg_xltq_xm_device_id_get(payload); + *xlt_cap_ipv4_lpm = mlxsw_reg_xltq_xlt_cap_ipv4_lpm_get(payload); + *xlt_cap_ipv6_lpm = mlxsw_reg_xltq_xlt_cap_ipv6_lpm_get(payload); + *cap_xlt_entries = mlxsw_reg_xltq_cap_xlt_entries_get(payload); + *cap_xlt_mtable = mlxsw_reg_xltq_cap_xlt_mtable_get(payload); +} + +/* XMDR - XM Direct Register + * ------------------------- + * The XMDR allows direct access to the XM device via the switch. + * Working in synchronous mode. FW waits for response from the XLT + * for each command. FW acks the XMDR accordingly. + */ +#define MLXSW_REG_XMDR_ID 0x7803 +#define MLXSW_REG_XMDR_BASE_LEN 0x20 +#define MLXSW_REG_XMDR_TRANS_LEN 0x80 +#define MLXSW_REG_XMDR_LEN (MLXSW_REG_XMDR_BASE_LEN + \ + MLXSW_REG_XMDR_TRANS_LEN) + +MLXSW_REG_DEFINE(xmdr, MLXSW_REG_XMDR_ID, MLXSW_REG_XMDR_LEN); + +/* reg_xmdr_bulk_entry + * Bulk_entry + * 0: Last entry - immediate flush of XRT-cache + * 1: Bulk entry - do not flush the XRT-cache + * Access: OP + */ +MLXSW_ITEM32(reg, xmdr, bulk_entry, 0x04, 8, 1); + +/* reg_xmdr_num_rec + * Number of records for Direct access to XM + * Supported: 0..4 commands (except NOP which is a filler) + * 0 commands is reserved when bulk_entry = 1. + * 0 commands is allowed when bulk_entry = 0 for immediate XRT-cache flush. + * Access: OP + */ +MLXSW_ITEM32(reg, xmdr, num_rec, 0x04, 0, 4); + +/* reg_xmdr_reply_vect + * Reply Vector + * Bit i for command index i+1 + * values per bit: + * 0: failed + * 1: succeeded + * e.g. if commands 1, 2, 4 succeeded and command 3 failed then binary + * value will be 0b1011 + * Access: RO + */ +MLXSW_ITEM_BIT_ARRAY(reg, xmdr, reply_vect, 0x08, 4, 1); + +static inline void mlxsw_reg_xmdr_pack(char *payload, bool bulk_entry) +{ + MLXSW_REG_ZERO(xmdr, payload); + mlxsw_reg_xmdr_bulk_entry_set(payload, bulk_entry); +} + +enum mlxsw_reg_xmdr_c_cmd_id { + MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4 = 0x30, + MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6 = 0x31, +}; + +#define MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN 32 +#define MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN 48 + +/* reg_xmdr_c_cmd_id + */ +MLXSW_ITEM32(reg, xmdr_c, cmd_id, 0x00, 24, 8); + +/* reg_xmdr_c_seq_number + */ +MLXSW_ITEM32(reg, xmdr_c, seq_number, 0x00, 12, 12); + +enum mlxsw_reg_xmdr_c_ltr_op { + /* Activity is set */ + MLXSW_REG_XMDR_C_LTR_OP_WRITE = 0, + /* There is no update mask. All fields are updated. */ + MLXSW_REG_XMDR_C_LTR_OP_UPDATE = 1, + MLXSW_REG_XMDR_C_LTR_OP_DELETE = 2, +}; + +/* reg_xmdr_c_ltr_op + * Operation. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_op, 0x04, 24, 8); + +/* reg_xmdr_c_ltr_trap_action + * Trap action. + * Values are defined in enum mlxsw_reg_ralue_trap_action. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_trap_action, 0x04, 20, 4); + +enum mlxsw_reg_xmdr_c_ltr_trap_id_num { + MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS0, + MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS1, + MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS2, + MLXSW_REG_XMDR_C_LTR_TRAP_ID_NUM_RTR_INGRESS3, +}; + +/* reg_xmdr_c_ltr_trap_id_num + * Trap-ID number. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_trap_id_num, 0x04, 16, 4); + +/* reg_xmdr_c_ltr_virtual_router + * Virtual Router ID. + * Range is 0..cap_max_virtual_routers-1 + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_virtual_router, 0x04, 0, 16); + +/* reg_xmdr_c_ltr_prefix_len + * Number of bits in the prefix of the LPM route. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_prefix_len, 0x08, 24, 8); + +/* reg_xmdr_c_ltr_bmp_len + * The best match prefix length in the case that there is no match for + * longer prefixes. + * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_bmp_len, 0x08, 16, 8); + +/* reg_xmdr_c_ltr_entry_type + * Entry type. + * Values are defined in enum mlxsw_reg_ralue_entry_type. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_entry_type, 0x08, 4, 4); + +enum mlxsw_reg_xmdr_c_ltr_action_type { + MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL, + MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE, + MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME, +}; + +/* reg_xmdr_c_ltr_action_type + * Action Type. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_action_type, 0x08, 0, 4); + +/* reg_xmdr_c_ltr_erif + * Egress Router Interface. + * Only relevant in case of LOCAL action. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_erif, 0x10, 0, 16); + +/* reg_xmdr_c_ltr_adjacency_index + * Points to the first entry of the group-based ECMP. + * Only relevant in case of REMOTE action. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_adjacency_index, 0x10, 0, 24); + +#define MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC 0xFFFFFF + +/* reg_xmdr_c_ltr_pointer_to_tunnel + * Only relevant in case of IP2ME action. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_pointer_to_tunnel, 0x10, 0, 24); + +/* reg_xmdr_c_ltr_ecmp_size + * Amount of sequential entries starting + * from the adjacency_index (the number of ECMPs). + * The valid range is 1-64, 512, 1024, 2048 and 4096. + * Only relevant in case of REMOTE action. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_ecmp_size, 0x14, 0, 32); + +/* reg_xmdr_c_ltr_dip* + * The prefix of the route or of the marker that the object of the LPM + * is compared with. The most significant bits of the dip are the prefix. + * The least significant bits must be '0' if the prefix_len is smaller + * than 128 for IPv6 or smaller than 32 for IPv4. + */ +MLXSW_ITEM32(reg, xmdr_c, ltr_dip4, 0x1C, 0, 32); +MLXSW_ITEM_BUF(reg, xmdr_c, ltr_dip6, 0x1C, 16); + +static inline void +mlxsw_reg_xmdr_c_ltr_pack(char *xmdr_payload, unsigned int trans_offset, + enum mlxsw_reg_xmdr_c_cmd_id cmd_id, u16 seq_number, + enum mlxsw_reg_xmdr_c_ltr_op op, u16 virtual_router, + u8 prefix_len) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + u8 num_rec = mlxsw_reg_xmdr_num_rec_get(xmdr_payload); + + mlxsw_reg_xmdr_num_rec_set(xmdr_payload, num_rec + 1); + + mlxsw_reg_xmdr_c_cmd_id_set(payload, cmd_id); + mlxsw_reg_xmdr_c_seq_number_set(payload, seq_number); + mlxsw_reg_xmdr_c_ltr_op_set(payload, op); + mlxsw_reg_xmdr_c_ltr_virtual_router_set(payload, virtual_router); + mlxsw_reg_xmdr_c_ltr_prefix_len_set(payload, prefix_len); + mlxsw_reg_xmdr_c_ltr_entry_type_set(payload, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY); + mlxsw_reg_xmdr_c_ltr_bmp_len_set(payload, prefix_len); +} + +static inline unsigned int +mlxsw_reg_xmdr_c_ltr_pack4(char *xmdr_payload, unsigned int trans_offset, + u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op, + u16 virtual_router, u8 prefix_len, u32 *dip) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + + mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset, + MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V4, + seq_number, op, virtual_router, prefix_len); + if (dip) + mlxsw_reg_xmdr_c_ltr_dip4_set(payload, *dip); + return MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN; +} + +static inline unsigned int +mlxsw_reg_xmdr_c_ltr_pack6(char *xmdr_payload, unsigned int trans_offset, + u16 seq_number, enum mlxsw_reg_xmdr_c_ltr_op op, + u16 virtual_router, u8 prefix_len, const void *dip) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + + mlxsw_reg_xmdr_c_ltr_pack(xmdr_payload, trans_offset, + MLXSW_REG_XMDR_C_CMD_ID_LT_ROUTE_V6, + seq_number, op, virtual_router, prefix_len); + if (dip) + mlxsw_reg_xmdr_c_ltr_dip6_memcpy_to(payload, dip); + return MLXSW_REG_XMDR_C_LT_ROUTE_V6_LEN; +} + +static inline void +mlxsw_reg_xmdr_c_ltr_act_remote_pack(char *xmdr_payload, unsigned int trans_offset, + enum mlxsw_reg_ralue_trap_action trap_action, + enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num, + u32 adjacency_index, u16 ecmp_size) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + + mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_REMOTE); + mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action); + mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num); + mlxsw_reg_xmdr_c_ltr_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_xmdr_c_ltr_ecmp_size_set(payload, ecmp_size); +} + +static inline void +mlxsw_reg_xmdr_c_ltr_act_local_pack(char *xmdr_payload, unsigned int trans_offset, + enum mlxsw_reg_ralue_trap_action trap_action, + enum mlxsw_reg_xmdr_c_ltr_trap_id_num trap_id_num, u16 erif) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + + mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_LOCAL); + mlxsw_reg_xmdr_c_ltr_trap_action_set(payload, trap_action); + mlxsw_reg_xmdr_c_ltr_trap_id_num_set(payload, trap_id_num); + mlxsw_reg_xmdr_c_ltr_erif_set(payload, erif); +} + +static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(char *xmdr_payload, + unsigned int trans_offset) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + + mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME); + mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload, + MLXSW_REG_XMDR_C_LTR_POINTER_TO_TUNNEL_DISABLED_MAGIC); +} + +static inline void mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(char *xmdr_payload, + unsigned int trans_offset, + u32 pointer_to_tunnel) +{ + char *payload = xmdr_payload + MLXSW_REG_XMDR_BASE_LEN + trans_offset; + + mlxsw_reg_xmdr_c_ltr_action_type_set(payload, MLXSW_REG_XMDR_C_LTR_ACTION_TYPE_IP2ME); + mlxsw_reg_xmdr_c_ltr_pointer_to_tunnel_set(payload, pointer_to_tunnel); +} + +/* XRMT - XM Router M Table Register + * --------------------------------- + * The XRMT configures the M-Table for the XLT-LPM. + */ +#define MLXSW_REG_XRMT_ID 0x7810 +#define MLXSW_REG_XRMT_LEN 0x14 + +MLXSW_REG_DEFINE(xrmt, MLXSW_REG_XRMT_ID, MLXSW_REG_XRMT_LEN); + +/* reg_xrmt_index + * Index in M-Table. + * Range 0..cap_xlt_mtable-1 + * Access: Index + */ +MLXSW_ITEM32(reg, xrmt, index, 0x04, 0, 20); + +/* reg_xrmt_l0_val + * Access: RW + */ +MLXSW_ITEM32(reg, xrmt, l0_val, 0x10, 24, 8); + +static inline void mlxsw_reg_xrmt_pack(char *payload, u32 index, u8 l0_val) +{ + MLXSW_REG_ZERO(xrmt, payload); + mlxsw_reg_xrmt_index_set(payload, index); + mlxsw_reg_xrmt_l0_val_set(payload, l0_val); +} + +/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register + * ----------------------------------------------------------- + * The XRALTA is used to allocate the XLT LPM trees. + * + * This register embeds original RALTA register. + */ +#define MLXSW_REG_XRALTA_ID 0x7811 +#define MLXSW_REG_XRALTA_LEN 0x08 +#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04 + +MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN); + +static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) +{ + char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET; + + MLXSW_REG_ZERO(xralta, payload); + mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id); +} + +/* XRALST - XM Router Algorithmic LPM Structure Tree Register + * ---------------------------------------------------------- + * The XRALST is used to set and query the structure of an XLT LPM tree. + * + * This register embeds original RALST register. + */ +#define MLXSW_REG_XRALST_ID 0x7812 +#define MLXSW_REG_XRALST_LEN 0x108 +#define MLXSW_REG_XRALST_RALST_OFFSET 0x04 + +MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN); + +static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id) +{ + char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET; + + MLXSW_REG_ZERO(xralst, payload); + mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id); +} + +static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number, + u8 left_child_bin, + u8 right_child_bin) +{ + char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET; + + mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin, + right_child_bin); +} + +/* XRALTB - XM Router Algorithmic LPM Tree Binding Register + * -------------------------------------------------------- + * The XRALTB register is used to bind virtual router and protocol + * to an allocated LPM tree. + * + * This register embeds original RALTB register. + */ +#define MLXSW_REG_XRALTB_ID 0x7813 +#define MLXSW_REG_XRALTB_LEN 0x08 +#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04 + +MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN); + +static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) +{ + char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET; + + MLXSW_REG_ZERO(xraltb, payload); + mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -10314,13 +11190,6 @@ enum mlxsw_reg_tnumt_record_type { */ MLXSW_ITEM32(reg, tnumt, record_type, 0x00, 28, 4); -enum mlxsw_reg_tnumt_tunnel_port { - MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, - MLXSW_REG_TNUMT_TUNNEL_PORT_VPLS, - MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL0, - MLXSW_REG_TNUMT_TUNNEL_FLEX_TUNNEL1, -}; - /* reg_tnumt_tunnel_port * Tunnel port. * Access: RW @@ -10368,7 +11237,7 @@ MLXSW_ITEM32_INDEXED(reg, tnumt, udip_ptr, 0x0C, 0, 24, 0x04, 0x00, false); static inline void mlxsw_reg_tnumt_pack(char *payload, enum mlxsw_reg_tnumt_record_type type, - enum mlxsw_reg_tnumt_tunnel_port tport, + enum mlxsw_reg_tunnel_port tport, u32 underlay_mc_ptr, bool vnext, u32 next_underlay_mc_ptr, u8 record_size) @@ -10532,13 +11401,6 @@ static inline void mlxsw_reg_tndem_pack(char *payload, u8 underlay_ecn, MLXSW_REG_DEFINE(tnpc, MLXSW_REG_TNPC_ID, MLXSW_REG_TNPC_LEN); -enum mlxsw_reg_tnpc_tunnel_port { - MLXSW_REG_TNPC_TUNNEL_PORT_NVE, - MLXSW_REG_TNPC_TUNNEL_PORT_VPLS, - MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL0, - MLXSW_REG_TNPC_TUNNEL_FLEX_TUNNEL1, -}; - /* reg_tnpc_tunnel_port * Tunnel port. * Access: Index @@ -10558,7 +11420,7 @@ MLXSW_ITEM32(reg, tnpc, learn_enable_v6, 0x04, 1, 1); MLXSW_ITEM32(reg, tnpc, learn_enable_v4, 0x04, 0, 1); static inline void mlxsw_reg_tnpc_pack(char *payload, - enum mlxsw_reg_tnpc_tunnel_port tport, + enum mlxsw_reg_tunnel_port tport, bool learn_enable) { MLXSW_REG_ZERO(tnpc, payload); @@ -11127,9 +11989,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(slcor), MLXSW_REG(spmlr), MLXSW_REG(svfa), + MLXSW_REG(spvtr), MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(spvc), MLXSW_REG(cwtp), MLXSW_REG(cwtpm), MLXSW_REG(pgcr), @@ -11195,6 +12059,16 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rigr2), MLXSW_REG(recr2), MLXSW_REG(rmft2), + MLXSW_REG(rxlte), + MLXSW_REG(rxltm), + MLXSW_REG(rlcmld), + MLXSW_REG(rlpmce), + MLXSW_REG(xltq), + MLXSW_REG(xmdr), + MLXSW_REG(xrmt), + MLXSW_REG(xralta), + MLXSW_REG(xralst), + MLXSW_REG(xraltb), MLXSW_REG(mfcr), MLXSW_REG(mfsc), MLXSW_REG(mfsm), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b08853f71b2b..1650d9852b5b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -45,7 +45,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 1310 +#define MLXSW_SP1_FWREV_SUBMINOR 2018 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -62,7 +62,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { #define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 1310 +#define MLXSW_SP2_FWREV_SUBMINOR 2018 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -77,7 +77,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { #define MLXSW_SP3_FWREV_MAJOR 30 #define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 1310 +#define MLXSW_SP3_FWREV_SUBMINOR 2018 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -384,13 +384,37 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, return err; } +int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type) +{ + switch (ethtype) { + case ETH_P_8021Q: + *p_sver_type = 0; + break; + case ETH_P_8021AD: + *p_sver_type = 1; + break; + default: + return -EINVAL; + } + + return 0; +} + static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid) + u16 vid, u16 ethtype) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char spvid_pl[MLXSW_REG_SPVID_LEN]; + u8 sver_type; + int err; + + err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type); + if (err) + return err; + + mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid, + sver_type); - mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); } @@ -404,7 +428,8 @@ static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl); } -int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, + u16 ethtype) { int err; @@ -413,7 +438,7 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) if (err) return err; } else { - err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype); if (err) return err; err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true); @@ -425,7 +450,7 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) return 0; err_port_allow_untagged_set: - __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid); + __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype); return err; } @@ -1386,6 +1411,19 @@ static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_po return 0; } +int +mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool is_8021ad_tagged, + bool is_8021q_tagged) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spvc_pl[MLXSW_REG_SPVC_LEN]; + + mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port, + is_8021ad_tagged, is_8021q_tagged); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, u8 split_base_local_port, struct mlxsw_sp_port_mapping *port_mapping) @@ -1575,7 +1613,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_nve_init; } - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, + ETH_P_8021Q); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", mlxsw_sp_port->local_port); @@ -1592,6 +1631,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, } mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; + /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat + * only packets with 802.1q header as tagged packets. + */ + err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n", + local_port); + goto err_port_vlan_classification_set; + } + INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw, mlxsw_sp->ptp_ops->shaper_work); @@ -1618,6 +1667,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, err_register_netdev: err_port_overheat_init_val_set: + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); +err_port_vlan_classification_set: mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); err_port_vlan_create: @@ -1664,6 +1715,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_ptp_clear(mlxsw_sp_port); mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true); mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); mlxsw_sp_port_nve_fini(mlxsw_sp_port); @@ -1788,6 +1840,9 @@ static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp) return -ENOMEM; for (i = 1; i < max_ports; i++) { + if (mlxsw_core_port_is_xm(mlxsw_sp->core, i)) + continue; + err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &port_mapping); if (err) goto err_port_module_info_get; @@ -2881,6 +2936,8 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { .max_ib_mc = 0, .used_max_pkey = 1, .max_pkey = 0, + .used_kvh_xlt_cache_mode = 1, + .kvh_xlt_cache_mode = 1, .swid_config = { { .used_type = 1, @@ -3543,7 +3600,8 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *lag_dev) + struct net_device *lag_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_upper *lag; @@ -3579,8 +3637,20 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, if (mlxsw_sp_port->default_vlan->fid) mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); + /* Join a router interface configured on the LAG, if exists */ + err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan, + lag_dev, extack); + if (err) + goto err_router_join; + return 0; +err_router_join: + lag->ref_count--; + mlxsw_sp_port->lagged = 0; + mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, + mlxsw_sp_port->local_port); + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); @@ -3618,7 +3688,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, lag->ref_count--; /* Make sure untagged frames are allowed to ingress */ - mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, + ETH_P_8021Q); } static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -3840,6 +3911,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; int err = 0; + u16 proto; mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -3897,6 +3969,36 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } + if (netif_is_bridge_master(upper_dev)) { + br_vlan_get_proto(upper_dev, &proto); + if (br_vlan_enabled(upper_dev) && + proto != ETH_P_8021Q && proto != ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported"); + return -EOPNOTSUPP; + } + if (vlan_uses_dev(lower_dev) && + br_vlan_enabled(upper_dev) && + proto == ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported"); + return -EOPNOTSUPP; + } + } + if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) { + struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev); + + if (br_vlan_enabled(br_dev)) { + br_vlan_get_proto(br_dev, &proto); + if (proto == ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge"); + return -EOPNOTSUPP; + } + } + } + if (is_vlan_dev(upper_dev) && + ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { + NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); + return -EOPNOTSUPP; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -3913,7 +4015,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, } else if (netif_is_lag_master(upper_dev)) { if (info->linking) { err = mlxsw_sp_port_lag_join(mlxsw_sp_port, - upper_dev); + upper_dev, extack); } else { mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); mlxsw_sp_port_lag_leave(mlxsw_sp_port, @@ -4162,6 +4264,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, struct netdev_notifier_changeupper_info *info = ptr; struct netlink_ext_ack *extack; struct net_device *upper_dev; + u16 proto; if (!mlxsw_sp) return 0; @@ -4177,6 +4280,18 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, } if (!info->linking) break; + if (br_vlan_enabled(br_dev)) { + br_vlan_get_proto(br_dev, &proto); + if (proto == ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge"); + return -EOPNOTSUPP; + } + } + if (is_vlan_dev(upper_dev) && + ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) { + NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol"); + return -EOPNOTSUPP; + } if (netif_is_macvlan(upper_dev) && !mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) { NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 74b3959b36d4..a6956cfc9cb1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -4,6 +4,7 @@ #ifndef _MLXSW_SPECTRUM_H #define _MLXSW_SPECTRUM_H +#include <linux/ethtool.h> #include <linux/types.h> #include <linux/netdevice.h> #include <linux/rhashtable.h> @@ -427,6 +428,10 @@ int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, int prio, char *ppcnt_pl); int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up); +int +mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool is_8021ad_tagged, + bool is_8021q_tagged); /* spectrum_buffers.c */ struct mlxsw_sp_hdroom_prio { @@ -579,7 +584,9 @@ int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, bool learn_enable); -int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type); +int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, + u16 ethtype); struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); @@ -650,6 +657,10 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, struct net_device *l3_dev, unsigned long event, struct netdev_notifier_info *info); +int +mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, + struct net_device *l3_dev, + struct netlink_ext_ack *extack); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, @@ -1192,6 +1203,7 @@ struct mlxsw_sp_nve_params { enum mlxsw_sp_nve_type type; __be32 vni; const struct net_device *dev; + u16 ethertype; }; extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index daf029931b5f..ed81d4fa48ac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -913,7 +913,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) if (mlxsw_sp_nexthop_offload(nh) && - !mlxsw_sp_nexthop_group_has_ipip(nh)) + !mlxsw_sp_nexthop_group_has_ipip(nh) && + !mlxsw_sp_nexthop_is_discard(nh)) size++; return size; } @@ -1105,7 +1106,8 @@ start_again: nh_count = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { if (!mlxsw_sp_nexthop_offload(nh) || - mlxsw_sp_nexthop_group_has_ipip(nh)) + mlxsw_sp_nexthop_group_has_ipip(nh) || + mlxsw_sp_nexthop_is_discard(nh)) continue; if (nh_count < nh_skip) @@ -1186,7 +1188,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { if (!mlxsw_sp_nexthop_offload(nh) || - mlxsw_sp_nexthop_group_has_ipip(nh)) + mlxsw_sp_nexthop_group_has_ipip(nh) || + mlxsw_sp_nexthop_is_discard(nh)) continue; mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index a8525992528f..6ccca39bae84 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -142,9 +142,9 @@ mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, } static int -mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, - u32 tunnel_index, - struct mlxsw_sp_ipip_entry *ipip_entry) +mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); @@ -180,41 +180,6 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); } -static int -mlxsw_sp_ipip_fib_entry_op_gre4_ralue(struct mlxsw_sp *mlxsw_sp, - u32 dip, u8 prefix_len, u16 ul_vr_id, - enum mlxsw_reg_ralue_op op, - u32 tunnel_index) -{ - char ralue_pl[MLXSW_REG_RALUE_LEN]; - - mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_REG_RALXX_PROTOCOL_IPV4, op, - ul_vr_id, prefix_len, dip); - mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, tunnel_index); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); -} - -static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_ipip_entry *ipip_entry, - enum mlxsw_reg_ralue_op op, - u32 tunnel_index) -{ - u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb); - __be32 dip; - int err; - - err = mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(mlxsw_sp, tunnel_index, - ipip_entry); - if (err) - return err; - - dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, - ipip_entry->ol_dev).addr4; - return mlxsw_sp_ipip_fib_entry_op_gre4_ralue(mlxsw_sp, be32_to_cpu(dip), - 32, ul_vr_id, op, - tunnel_index); -} - static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { @@ -231,8 +196,7 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto, } static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *ol_dev, - enum mlxsw_sp_l3proto ol_proto) + const struct net_device *ol_dev) { struct ip_tunnel *tunnel = netdev_priv(ol_dev); __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ @@ -331,7 +295,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { .dev_type = ARPHRD_IPGRE, .ul_proto = MLXSW_SP_L3_PROTO_IPV4, .nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4, - .fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4, + .decap_config = mlxsw_sp_ipip_decap_config_gre4, .can_offload = mlxsw_sp_ipip_can_offload_gre4, .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4, .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index bb5c4d4a5872..87bef9880e5e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -43,17 +43,15 @@ struct mlxsw_sp_ipip_ops { struct mlxsw_sp_ipip_entry *ipip_entry); bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *ol_dev, - enum mlxsw_sp_l3proto ol_proto); + const struct net_device *ol_dev); /* Return a configuration for creating an overlay loopback RIF. */ struct mlxsw_sp_rif_ipip_lb_config (*ol_loopback_config)(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev); - int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp, + int (*decap_config)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, - enum mlxsw_reg_ralue_op op, u32 tunnel_index); int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 47eb751a2570..7846a21555ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -15,7 +15,7 @@ struct mlxsw_sp_mr { struct list_head table_list; struct mutex table_list_lock; /* Protects table_list */ #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */ - unsigned long priv[0]; + unsigned long priv[]; /* priv has to be always the last item */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index 54d3e7dcd303..e5ec595593f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -368,7 +368,7 @@ mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record) next_valid = true; } - mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TNUMT_TUNNEL_PORT_NVE, + mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE, mc_record->kvdl_index, next_valid, next_kvdl_index, mc_record->num_entries); @@ -798,11 +798,11 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, ops = nve->nve_ops_arr[params->type]; - if (!ops->can_offload(nve, params->dev, extack)) + if (!ops->can_offload(nve, params, extack)) return -EINVAL; memset(&config, 0, sizeof(config)); - ops->nve_config(nve, params->dev, &config); + ops->nve_config(nve, params, &config); if (nve->num_nve_tunnels && memcmp(&config, &nve->config, sizeof(config))) { NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 12f664f42f21..2796d3659979 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -18,6 +18,7 @@ struct mlxsw_sp_nve_config { u32 ul_tb_id; enum mlxsw_sp_l3proto ul_proto; union mlxsw_sp_l3addr ul_sip; + u16 ethertype; }; struct mlxsw_sp_nve { @@ -35,10 +36,10 @@ struct mlxsw_sp_nve { struct mlxsw_sp_nve_ops { enum mlxsw_sp_nve_type type; bool (*can_offload)(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack); void (*nve_config)(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config); int (*init)(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index 05517c7feaa5..3e2bb22e9ca6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -22,10 +22,10 @@ VXLAN_F_LEARN) static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) { - struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; if (cfg->saddr.sa.sa_family != AF_INET) { @@ -86,11 +86,23 @@ static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return true; } +static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, + const struct mlxsw_sp_nve_params *params, + struct netlink_ext_ack *extack) +{ + if (params->ethertype == ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: 802.1ad bridge is not supported with VxLAN"); + return false; + } + + return mlxsw_sp_nve_vxlan_can_offload(nve, params, extack); +} + static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, + const struct mlxsw_sp_nve_params *params, struct mlxsw_sp_nve_config *config) { - struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_dev *vxlan = netdev_priv(params->dev); struct vxlan_config *cfg = &vxlan->cfg; config->type = MLXSW_SP_NVE_TYPE_VXLAN; @@ -101,6 +113,7 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->ul_proto = MLXSW_SP_L3_PROTO_IPV4; config->ul_sip.addr4 = cfg->saddr.sin.sin_addr.s_addr; config->udp_dport = cfg->dst_port; + config->ethertype = params->ethertype; } static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, @@ -286,7 +299,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni) const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, - .can_offload = mlxsw_sp_nve_vxlan_can_offload, + .can_offload = mlxsw_sp1_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp1_nve_vxlan_init, .fini = mlxsw_sp1_nve_vxlan_fini, @@ -299,16 +312,35 @@ static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, { char tnpc_pl[MLXSW_REG_TNPC_LEN]; - mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE, + mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TUNNEL_PORT_NVE, learning_en); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl); } static int +mlxsw_sp2_nve_decap_ethertype_set(struct mlxsw_sp *mlxsw_sp, u16 ethertype) +{ + char spvid_pl[MLXSW_REG_SPVID_LEN] = {}; + u8 sver_type; + int err; + + mlxsw_reg_spvid_tport_set(spvid_pl, true); + mlxsw_reg_spvid_local_port_set(spvid_pl, + MLXSW_REG_TUNNEL_PORT_NVE); + err = mlxsw_sp_ethtype_to_sver_type(ethertype, &sver_type); + if (err) + return err; + + mlxsw_reg_spvid_et_vlan_set(spvid_pl, sver_type); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); +} + +static int mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nve_config *config) { char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + char spvtr_pl[MLXSW_REG_SPVTR_LEN]; u16 ul_rif_index; int err; @@ -329,8 +361,25 @@ mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tngcr_write; + mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_SPVTR_IPVID_MODE_ALWAYS_PUSH_VLAN); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); + if (err) + goto err_spvtr_write; + + err = mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, config->ethertype); + if (err) + goto err_decap_ethertype_set; + return 0; +err_decap_ethertype_set: + mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); +err_spvtr_write: + mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); err_tngcr_write: mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); err_vxlan_learning_set: @@ -340,8 +389,14 @@ err_vxlan_learning_set: static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) { + char spvtr_pl[MLXSW_REG_SPVTR_LEN]; char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + /* Set default EtherType */ + mlxsw_sp2_nve_decap_ethertype_set(mlxsw_sp, ETH_P_8021Q); + mlxsw_reg_spvtr_pack(spvtr_pl, true, MLXSW_REG_TUNNEL_PORT_NVE, + MLXSW_REG_SPVTR_IPVID_MODE_IEEE_COMPLIANT_PVID); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvtr), spvtr_pl); mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index ca8090a28dec..d6e9ecb14681 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -828,10 +828,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp) goto err_hashtable_init; /* Delive these message types as PTP0. */ - message_type = BIT(MLXSW_SP_PTP_MESSAGE_TYPE_SYNC) | - BIT(MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ) | - BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ) | - BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP); + message_type = BIT(PTP_MSGTYPE_SYNC) | + BIT(PTP_MSGTYPE_DELAY_REQ) | + BIT(PTP_MSGTYPE_PDELAY_REQ) | + BIT(PTP_MSGTYPE_PDELAY_RESP); err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, message_type); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index 8c386571afce..1d43a3755285 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -11,13 +11,6 @@ struct mlxsw_sp; struct mlxsw_sp_port; struct mlxsw_sp_ptp_clock; -enum { - MLXSW_SP_PTP_MESSAGE_TYPE_SYNC, - MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ, - MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ, - MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP, -}; - static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4381f8c6c3fb..41424ee909a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -352,6 +352,7 @@ enum mlxsw_sp_fib_entry_type { MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP, }; +struct mlxsw_sp_nexthop_group_info; struct mlxsw_sp_nexthop_group; struct mlxsw_sp_fib_entry; @@ -368,18 +369,71 @@ struct mlxsw_sp_fib_entry_decap { u32 tunnel_index; }; +static struct mlxsw_sp_fib_entry_priv * +mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops) +{ + struct mlxsw_sp_fib_entry_priv *priv; + + if (!ll_ops->fib_entry_priv_size) + /* No need to have priv */ + return NULL; + + priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + refcount_set(&priv->refcnt, 1); + return priv; +} + +static void +mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv) +{ + kfree(priv); +} + +static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv) +{ + refcount_inc(&priv->refcnt); +} + +static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv) +{ + if (!priv || !refcount_dec_and_test(&priv->refcnt)) + return; + mlxsw_sp_fib_entry_priv_destroy(priv); +} + +static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_entry_priv *priv) +{ + if (!priv) + return; + mlxsw_sp_fib_entry_priv_hold(priv); + list_add(&priv->list, &op_ctx->fib_entry_priv_list); +} + +static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx) +{ + struct mlxsw_sp_fib_entry_priv *priv, *tmp; + + list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list) + mlxsw_sp_fib_entry_priv_put(priv); + INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list); +} + struct mlxsw_sp_fib_entry { struct mlxsw_sp_fib_node *fib_node; enum mlxsw_sp_fib_entry_type type; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */ + struct mlxsw_sp_fib_entry_priv *priv; }; struct mlxsw_sp_fib4_entry { struct mlxsw_sp_fib_entry common; + struct fib_info *fi; u32 tb_id; - u32 prio; u8 tos; u8 type; }; @@ -409,6 +463,7 @@ struct mlxsw_sp_fib { struct mlxsw_sp_vr *vr; struct mlxsw_sp_lpm_tree *lpm_tree; enum mlxsw_sp_l3proto proto; + const struct mlxsw_sp_router_ll_ops *ll_ops; }; struct mlxsw_sp_vr { @@ -422,16 +477,45 @@ struct mlxsw_sp_vr { refcount_t ul_rif_refcnt; }; +static int mlxsw_sp_router_ll_basic_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id, + enum mlxsw_sp_l3proto proto) +{ + return 0; +} + +static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl) +{ + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), + xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET); +} + +static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl) +{ + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), + xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET); +} + +static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl) +{ + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), + xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET); +} + static const struct rhashtable_params mlxsw_sp_fib_ht_params; static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, enum mlxsw_sp_l3proto proto) { + const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto]; struct mlxsw_sp_lpm_tree *lpm_tree; struct mlxsw_sp_fib *fib; int err; + err = ll_ops->init(mlxsw_sp, vr->id, proto); + if (err) + return ERR_PTR(err); + lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto]; fib = kzalloc(sizeof(*fib), GFP_KERNEL); if (!fib) @@ -443,6 +527,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp, fib->proto = proto; fib->vr = vr; fib->lpm_tree = lpm_tree; + fib->ll_ops = ll_ops; mlxsw_sp_lpm_tree_hold(lpm_tree); err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id); if (err) @@ -481,33 +566,36 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp) } static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_router_ll_ops *ll_ops, struct mlxsw_sp_lpm_tree *lpm_tree) { - char ralta_pl[MLXSW_REG_RALTA_LEN]; + char xralta_pl[MLXSW_REG_XRALTA_LEN]; - mlxsw_reg_ralta_pack(ralta_pl, true, - (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, - lpm_tree->id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); + mlxsw_reg_xralta_pack(xralta_pl, true, + (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, + lpm_tree->id); + return ll_ops->ralta_write(mlxsw_sp, xralta_pl); } static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_router_ll_ops *ll_ops, struct mlxsw_sp_lpm_tree *lpm_tree) { - char ralta_pl[MLXSW_REG_RALTA_LEN]; + char xralta_pl[MLXSW_REG_XRALTA_LEN]; - mlxsw_reg_ralta_pack(ralta_pl, false, - (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, - lpm_tree->id); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); + mlxsw_reg_xralta_pack(xralta_pl, false, + (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, + lpm_tree->id); + ll_ops->ralta_write(mlxsw_sp, xralta_pl); } static int mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_router_ll_ops *ll_ops, struct mlxsw_sp_prefix_usage *prefix_usage, struct mlxsw_sp_lpm_tree *lpm_tree) { - char ralst_pl[MLXSW_REG_RALST_LEN]; + char xralst_pl[MLXSW_REG_XRALST_LEN]; u8 root_bin = 0; u8 prefix; u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD; @@ -515,19 +603,20 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) root_bin = prefix; - mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id); + mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id); mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) { if (prefix == 0) continue; - mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix, - MLXSW_REG_RALST_BIN_NO_CHILD); + mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix, + MLXSW_REG_RALST_BIN_NO_CHILD); last_prefix = prefix; } - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); + return ll_ops->ralst_write(mlxsw_sp, xralst_pl); } static struct mlxsw_sp_lpm_tree * mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_router_ll_ops *ll_ops, struct mlxsw_sp_prefix_usage *prefix_usage, enum mlxsw_sp_l3proto proto) { @@ -538,12 +627,11 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, if (!lpm_tree) return ERR_PTR(-EBUSY); lpm_tree->proto = proto; - err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree); + err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree); if (err) return ERR_PTR(err); - err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage, - lpm_tree); + err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree); if (err) goto err_left_struct_set; memcpy(&lpm_tree->prefix_usage, prefix_usage, @@ -554,14 +642,15 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, return lpm_tree; err_left_struct_set: - mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); + mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree); return ERR_PTR(err); } static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_router_ll_ops *ll_ops, struct mlxsw_sp_lpm_tree *lpm_tree) { - mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); + mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree); } static struct mlxsw_sp_lpm_tree * @@ -569,6 +658,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_prefix_usage *prefix_usage, enum mlxsw_sp_l3proto proto) { + const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto]; struct mlxsw_sp_lpm_tree *lpm_tree; int i; @@ -582,7 +672,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, return lpm_tree; } } - return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto); + return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto); } static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) @@ -593,8 +683,11 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lpm_tree *lpm_tree) { + const struct mlxsw_sp_router_ll_ops *ll_ops = + mlxsw_sp->router->proto_ll_ops[lpm_tree->proto]; + if (--lpm_tree->ref_count == 0) - mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); + mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree); } #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */ @@ -684,23 +777,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_fib *fib, u8 tree_id) { - char raltb_pl[MLXSW_REG_RALTB_LEN]; + char xraltb_pl[MLXSW_REG_XRALTB_LEN]; - mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, - (enum mlxsw_reg_ralxx_protocol) fib->proto, - tree_id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); + mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id, + (enum mlxsw_reg_ralxx_protocol) fib->proto, + tree_id); + return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl); } static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_fib *fib) { - char raltb_pl[MLXSW_REG_RALTB_LEN]; + char xraltb_pl[MLXSW_REG_XRALTB_LEN]; /* Bind to tree 0 which is default */ - mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, - (enum mlxsw_reg_ralxx_protocol) fib->proto, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); + mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id, + (enum mlxsw_reg_ralxx_protocol) fib->proto, 0); + return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl); } static u32 mlxsw_sp_fix_tb_id(u32 tb_id) @@ -1270,21 +1363,33 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp, /* Given decap parameters, find the corresponding IPIP entry. */ static struct mlxsw_sp_ipip_entry * -mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, - const struct net_device *ul_dev, +mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex, enum mlxsw_sp_l3proto ul_proto, union mlxsw_sp_l3addr ul_dip) { - struct mlxsw_sp_ipip_entry *ipip_entry; + struct mlxsw_sp_ipip_entry *ipip_entry = NULL; + struct net_device *ul_dev; + + rcu_read_lock(); + + ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex); + if (!ul_dev) + goto out_unlock; list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, ipip_list_node) if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev, ul_proto, ul_dip, ipip_entry)) - return ipip_entry; + goto out_unlock; + + rcu_read_unlock(); return NULL; + +out_unlock: + rcu_read_unlock(); + return ipip_entry; } static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, @@ -1370,11 +1475,7 @@ static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_ops *ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; - /* For deciding whether decap should be offloaded, we don't care about - * overlay protocol, so ask whether either one is supported. - */ - return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) || - ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6); + return ops->can_offload(mlxsw_sp, ol_dev); } static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp, @@ -2749,10 +2850,11 @@ struct mlxsw_sp_nexthop { struct list_head neigh_list_node; /* member of neigh entry list */ struct list_head rif_list_node; struct list_head router_list_node; - struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group - * this belongs to - */ + struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group + * this nexthop belongs to + */ struct rhash_head ht_node; + struct neigh_table *neigh_tbl; struct mlxsw_sp_nexthop_key key; unsigned char gw_addr[sizeof(struct in6_addr)]; int ifindex; @@ -2766,9 +2868,10 @@ struct mlxsw_sp_nexthop { offloaded:1, /* set in case the neigh is actually put into * KVD linear area of this group. */ - update:1; /* set indicates that MAC of this neigh should be + update:1, /* set indicates that MAC of this neigh should be * updated in HW */ + discard:1; /* nexthop is programmed to discard packets */ enum mlxsw_sp_nexthop_type type; union { struct mlxsw_sp_neigh_entry *neigh_entry; @@ -2778,21 +2881,54 @@ struct mlxsw_sp_nexthop { bool counter_valid; }; -struct mlxsw_sp_nexthop_group { - void *priv; - struct rhash_head ht_node; - struct list_head fib_list; /* list of fib entries that use this group */ - struct neigh_table *neigh_tbl; - u8 adj_index_valid:1, - gateway:1; /* routes using the group use a gateway */ +enum mlxsw_sp_nexthop_group_type { + MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4, + MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6, + MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ, +}; + +struct mlxsw_sp_nexthop_group_info { + struct mlxsw_sp_nexthop_group *nh_grp; u32 adj_index; u16 ecmp_size; u16 count; int sum_norm_weight; + u8 adj_index_valid:1, + gateway:1; /* routes using the group use a gateway */ struct mlxsw_sp_nexthop nexthops[0]; #define nh_rif nexthops[0].rif }; +struct mlxsw_sp_nexthop_group_vr_key { + u16 vr_id; + enum mlxsw_sp_l3proto proto; +}; + +struct mlxsw_sp_nexthop_group_vr_entry { + struct list_head list; /* member in vr_list */ + struct rhash_head ht_node; /* member in vr_ht */ + refcount_t ref_count; + struct mlxsw_sp_nexthop_group_vr_key key; +}; + +struct mlxsw_sp_nexthop_group { + struct rhash_head ht_node; + struct list_head fib_list; /* list of fib entries that use this group */ + union { + struct { + struct fib_info *fi; + } ipv4; + struct { + u32 id; + } obj; + }; + struct mlxsw_sp_nexthop_group_info *nhgi; + struct list_head vr_list; + struct rhashtable vr_ht; + enum mlxsw_sp_nexthop_group_type type; + bool can_destroy; +}; + void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { @@ -2858,18 +2994,18 @@ unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, u32 *p_adj_size, u32 *p_adj_hash_index) { - struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; + struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi; u32 adj_hash_index = 0; int i; - if (!nh->offloaded || !nh_grp->adj_index_valid) + if (!nh->offloaded || !nhgi->adj_index_valid) return -EINVAL; - *p_adj_index = nh_grp->adj_index; - *p_adj_size = nh_grp->ecmp_size; + *p_adj_index = nhgi->adj_index; + *p_adj_size = nhgi->ecmp_size; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i]; if (nh_iter == nh) break; @@ -2888,11 +3024,11 @@ struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh) bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) { - struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; + struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi; int i; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i]; if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP) return true; @@ -2900,17 +3036,107 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) return false; } -static struct fib_info * -mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) +bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh) +{ + return nh->discard; +} + +static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key), + .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node), + .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key), + .automatic_shrinking = true, +}; + +static struct mlxsw_sp_nexthop_group_vr_entry * +mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_fib *fib) +{ + struct mlxsw_sp_nexthop_group_vr_key key; + + memset(&key, 0, sizeof(key)); + key.vr_id = fib->vr->id; + key.proto = fib->proto; + return rhashtable_lookup_fast(&nh_grp->vr_ht, &key, + mlxsw_sp_nexthop_group_vr_ht_params); +} + +static int +mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_fib *fib) +{ + struct mlxsw_sp_nexthop_group_vr_entry *vr_entry; + int err; + + vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL); + if (!vr_entry) + return -ENOMEM; + + vr_entry->key.vr_id = fib->vr->id; + vr_entry->key.proto = fib->proto; + refcount_set(&vr_entry->ref_count, 1); + + err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node, + mlxsw_sp_nexthop_group_vr_ht_params); + if (err) + goto err_hashtable_insert; + + list_add(&vr_entry->list, &nh_grp->vr_list); + + return 0; + +err_hashtable_insert: + kfree(vr_entry); + return err; +} + +static void +mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop_group_vr_entry *vr_entry) +{ + list_del(&vr_entry->list); + rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node, + mlxsw_sp_nexthop_group_vr_ht_params); + kfree(vr_entry); +} + +static int +mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_fib *fib) { - return nh_grp->priv; + struct mlxsw_sp_nexthop_group_vr_entry *vr_entry; + + vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib); + if (vr_entry) { + refcount_inc(&vr_entry->ref_count); + return 0; + } + + return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib); +} + +static void +mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_fib *fib) +{ + struct mlxsw_sp_nexthop_group_vr_entry *vr_entry; + + vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib); + if (WARN_ON_ONCE(!vr_entry)) + return; + + if (!refcount_dec_and_test(&vr_entry->ref_count)) + return; + + mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry); } struct mlxsw_sp_nexthop_group_cmp_arg { - enum mlxsw_sp_l3proto proto; + enum mlxsw_sp_nexthop_group_type type; union { struct fib_info *fi; struct mlxsw_sp_fib6_entry *fib6_entry; + u32 id; }; }; @@ -2921,10 +3147,10 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, { int i; - for (i = 0; i < nh_grp->count; i++) { + for (i = 0; i < nh_grp->nhgi->count; i++) { const struct mlxsw_sp_nexthop *nh; - nh = &nh_grp->nexthops[i]; + nh = &nh_grp->nhgi->nexthops[i]; if (nh->ifindex == ifindex && nh->nh_weight == weight && ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) return true; @@ -2939,7 +3165,7 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; - if (nh_grp->count != fib6_entry->nrt6) + if (nh_grp->nhgi->count != fib6_entry->nrt6) return false; list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { @@ -2964,24 +3190,23 @@ mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr) const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key; const struct mlxsw_sp_nexthop_group *nh_grp = ptr; - switch (cmp_arg->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); - case MLXSW_SP_L3_PROTO_IPV6: + if (nh_grp->type != cmp_arg->type) + return 1; + + switch (cmp_arg->type) { + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4: + return cmp_arg->fi != nh_grp->ipv4.fi; + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6: return !mlxsw_sp_nexthop6_group_cmp(nh_grp, cmp_arg->fib6_entry); + case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ: + return cmp_arg->id != nh_grp->obj.id; default: WARN_ON(1); return 1; } } -static int -mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp) -{ - return nh_grp->neigh_tbl->family; -} - static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) { const struct mlxsw_sp_nexthop_group *nh_grp = data; @@ -2990,18 +3215,20 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) unsigned int val; int i; - switch (mlxsw_sp_nexthop_group_type(nh_grp)) { - case AF_INET: - fi = mlxsw_sp_nexthop4_group_fi(nh_grp); + switch (nh_grp->type) { + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4: + fi = nh_grp->ipv4.fi; return jhash(&fi, sizeof(fi), seed); - case AF_INET6: - val = nh_grp->count; - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6: + val = nh_grp->nhgi->count; + for (i = 0; i < nh_grp->nhgi->count; i++) { + nh = &nh_grp->nhgi->nexthops[i]; val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed); val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed); } return jhash(&val, sizeof(val), seed); + case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ: + return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed); default: WARN_ON(1); return 0; @@ -3031,11 +3258,13 @@ mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed) { const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data; - switch (cmp_arg->proto) { - case MLXSW_SP_L3_PROTO_IPV4: + switch (cmp_arg->type) { + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4: return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); - case MLXSW_SP_L3_PROTO_IPV6: + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6: return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed); + case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ: + return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed); default: WARN_ON(1); return 0; @@ -3052,8 +3281,8 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { - if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && - !nh_grp->gateway) + if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 && + !nh_grp->nhgi->gateway) return 0; return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht, @@ -3064,8 +3293,8 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { - if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && - !nh_grp->gateway) + if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 && + !nh_grp->nhgi->gateway) return; rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht, @@ -3079,7 +3308,7 @@ mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; - cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4; + cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4; cmp_arg.fi = fi; return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &cmp_arg, @@ -3092,7 +3321,7 @@ mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; - cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6; + cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6; cmp_arg.fib6_entry = fib6_entry; return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &cmp_arg, @@ -3128,7 +3357,8 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib *fib, + enum mlxsw_sp_l3proto proto, + u16 vr_id, u32 adj_index, u16 ecmp_size, u32 new_adj_index, u16 new_ecmp_size) @@ -3136,8 +3366,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, char raleu_pl[MLXSW_REG_RALEU_LEN]; mlxsw_reg_raleu_pack(raleu_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, - fib->vr->id, adj_index, ecmp_size, new_adj_index, + (enum mlxsw_reg_ralxx_protocol) proto, vr_id, + adj_index, ecmp_size, new_adj_index, new_ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); } @@ -3146,23 +3376,31 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, u32 old_adj_index, u16 old_ecmp_size) { - struct mlxsw_sp_fib_entry *fib_entry; - struct mlxsw_sp_fib *fib = NULL; + struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; + struct mlxsw_sp_nexthop_group_vr_entry *vr_entry; int err; - list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { - if (fib == fib_entry->fib_node->fib) - continue; - fib = fib_entry->fib_node->fib; - err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib, + list_for_each_entry(vr_entry, &nh_grp->vr_list, list) { + err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, + vr_entry->key.proto, + vr_entry->key.vr_id, old_adj_index, old_ecmp_size, - nh_grp->adj_index, - nh_grp->ecmp_size); + nhgi->adj_index, + nhgi->ecmp_size); if (err) - return err; + goto err_mass_update_vr; } return 0; + +err_mass_update_vr: + list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list) + mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto, + vr_entry->key.vr_id, + nhgi->adj_index, + nhgi->ecmp_size, + old_adj_index, old_ecmp_size); + return err; } static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, @@ -3173,8 +3411,12 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, MLXSW_REG_RATR_TYPE_ETHERNET, - adj_index, neigh_entry->rif); - mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + adj_index, nh->rif->rif_index); + if (nh->discard) + mlxsw_reg_ratr_trap_action_set(ratr_pl, + MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS); + else + mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); if (nh->counter_valid) mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); else @@ -3229,15 +3471,15 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop_group_info *nhgi, bool reallocate) { - u32 adj_index = nh_grp->adj_index; /* base */ + u32 adj_index = nhgi->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; int i; - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + nh = &nhgi->nexthops[i]; if (!nh->should_offload) { nh->offloaded = 0; @@ -3337,13 +3579,13 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, } static void -mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp) +mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi) { int i, g = 0, sum_norm_weight = 0; struct mlxsw_sp_nexthop *nh; - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + nh = &nhgi->nexthops[i]; if (!nh->should_offload) continue; @@ -3353,8 +3595,8 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp) g = nh->nh_weight; } - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + nh = &nhgi->nexthops[i]; if (!nh->should_offload) continue; @@ -3362,18 +3604,18 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp) sum_norm_weight += nh->norm_nh_weight; } - nh_grp->sum_norm_weight = sum_norm_weight; + nhgi->sum_norm_weight = sum_norm_weight; } static void -mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp) +mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi) { - int total = nh_grp->sum_norm_weight; - u16 ecmp_size = nh_grp->ecmp_size; int i, weight = 0, lower_bound = 0; + int total = nhgi->sum_norm_weight; + u16 ecmp_size = nhgi->ecmp_size; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; int upper_bound; if (!nh->should_offload) @@ -3395,8 +3637,8 @@ mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, { int i; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + for (i = 0; i < nh_grp->nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i]; if (nh->offloaded) nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD; @@ -3439,39 +3681,59 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, } static void +mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + /* Do not update the flags if the nexthop group is being destroyed + * since: + * 1. The nexthop objects is being deleted, in which case the flags are + * irrelevant. + * 2. The nexthop group was replaced by a newer group, in which case + * the flags of the nexthop object were already updated based on the + * new group. + */ + if (nh_grp->can_destroy) + return; + + nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id, + nh_grp->nhgi->adj_index_valid, false); +} + +static void mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { - switch (mlxsw_sp_nexthop_group_type(nh_grp)) { - case AF_INET: + switch (nh_grp->type) { + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4: mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp); break; - case AF_INET6: + case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6: mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp); break; + case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ: + mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp); + break; } } -static void +static int mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; u16 ecmp_size, old_ecmp_size; struct mlxsw_sp_nexthop *nh; bool offload_change = false; u32 adj_index; bool old_adj_index_valid; + int i, err2, err = 0; u32 old_adj_index; - int i; - int err; - if (!nh_grp->gateway) { - mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); - return; - } + if (!nhgi->gateway) + return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; + for (i = 0; i < nhgi->count; i++) { + nh = &nhgi->nexthops[i]; if (nh->should_offload != nh->offloaded) { offload_change = true; @@ -3483,21 +3745,21 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* Nothing was added or removed, so no need to reallocate. Just * update MAC on existing adjacency indexes. */ - err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false); + err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; } - return; + return 0; } - mlxsw_sp_nexthop_group_normalize(nh_grp); - if (!nh_grp->sum_norm_weight) + mlxsw_sp_nexthop_group_normalize(nhgi); + if (!nhgi->sum_norm_weight) /* No neigh of this group is connected so we just set * the trap and let everthing flow through kernel. */ goto set_trap; - ecmp_size = nh_grp->sum_norm_weight; + ecmp_size = nhgi->sum_norm_weight; err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size); if (err) /* No valid allocation size available. */ @@ -3512,14 +3774,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); goto set_trap; } - old_adj_index_valid = nh_grp->adj_index_valid; - old_adj_index = nh_grp->adj_index; - old_ecmp_size = nh_grp->ecmp_size; - nh_grp->adj_index_valid = 1; - nh_grp->adj_index = adj_index; - nh_grp->ecmp_size = ecmp_size; - mlxsw_sp_nexthop_group_rebalance(nh_grp); - err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true); + old_adj_index_valid = nhgi->adj_index_valid; + old_adj_index = nhgi->adj_index; + old_ecmp_size = nhgi->ecmp_size; + nhgi->adj_index_valid = 1; + nhgi->adj_index = adj_index; + nhgi->ecmp_size = ecmp_size; + mlxsw_sp_nexthop_group_rebalance(nhgi); + err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; @@ -3536,7 +3798,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n"); goto set_trap; } - return; + return 0; } err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, @@ -3548,22 +3810,23 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, goto set_trap; } - return; + return 0; set_trap: - old_adj_index_valid = nh_grp->adj_index_valid; - nh_grp->adj_index_valid = 0; - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; + old_adj_index_valid = nhgi->adj_index_valid; + nhgi->adj_index_valid = 0; + for (i = 0; i < nhgi->count; i++) { + nh = &nhgi->nexthops[i]; nh->offloaded = 0; } - err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); - if (err) + err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + if (err2) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp); if (old_adj_index_valid) mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, - nh_grp->ecmp_size, nh_grp->adj_index); + nhgi->ecmp_size, nhgi->adj_index); + return err; } static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, @@ -3589,10 +3852,9 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, nh = list_first_entry(&neigh_entry->nexthop_list, struct mlxsw_sp_nexthop, neigh_list_node); - n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); + n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (!n) { - n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, - nh->rif->dev); + n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); @@ -3615,7 +3877,7 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp, neigh_release(old_n); neigh_clone(n); __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected); - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); } neigh_release(n); @@ -3652,7 +3914,7 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, list_for_each_entry(nh, &neigh_entry->nexthop_list, neigh_list_node) { __mlxsw_sp_nexthop_neigh_update(nh, removing); - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); } } @@ -3683,7 +3945,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, u8 nud_state, dead; int err; - if (!nh->nh_grp->gateway || nh->neigh_entry) + if (!nh->nhgi->gateway || nh->neigh_entry) return 0; /* Take a reference of neigh here ensuring that neigh would @@ -3691,10 +3953,9 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, * The reference is taken either in neigh_lookup() or * in neigh_create() in case n is not found. */ - n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); + n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (!n) { - n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, - nh->rif->dev); + n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); @@ -3775,7 +4036,7 @@ static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, { bool removing; - if (!nh->nh_grp->gateway || nh->ipip_entry) + if (!nh->nhgi->gateway || nh->ipip_entry) return; nh->ipip_entry = ipip_entry; @@ -3807,27 +4068,11 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp, mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt); } -static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) -{ - switch (nh->type) { - case MLXSW_SP_NEXTHOP_TYPE_ETH: - mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); - mlxsw_sp_nexthop_rif_fini(nh); - break; - case MLXSW_SP_NEXTHOP_TYPE_IPIP: - mlxsw_sp_nexthop_rif_fini(nh); - mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); - break; - } -} - -static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh, - struct fib_nh *fib_nh) +static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, + const struct net_device *dev) { const struct mlxsw_sp_ipip_ops *ipip_ops; - struct net_device *dev = fib_nh->fib_nh_dev; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_rif *rif; int err; @@ -3835,8 +4080,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); if (ipip_entry) { ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; - if (ipip_ops->can_offload(mlxsw_sp, dev, - MLXSW_SP_L3_PROTO_IPV4)) { + if (ipip_ops->can_offload(mlxsw_sp, dev)) { nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); return 0; @@ -3860,10 +4104,19 @@ err_neigh_init: return err; } -static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { - mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); + switch (nh->type) { + case MLXSW_SP_NEXTHOP_TYPE_ETH: + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); + break; + case MLXSW_SP_NEXTHOP_TYPE_IPIP: + mlxsw_sp_nexthop_rif_fini(nh); + mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); + break; + } } static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, @@ -3875,7 +4128,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, struct in_device *in_dev; int err; - nh->nh_grp = nh_grp; + nh->nhgi = nh_grp->nhgi; nh->key.fib_nh = fib_nh; #ifdef CONFIG_IP_ROUTE_MULTIPATH nh->nh_weight = fib_nh->fib_nh_weight; @@ -3883,6 +4136,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, nh->nh_weight = 1; #endif memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4)); + nh->neigh_tbl = &arp_tbl; err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) return err; @@ -3892,6 +4146,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, if (!dev) return 0; + nh->ifindex = dev->ifindex; rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); @@ -3902,7 +4157,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, } rcu_read_unlock(); - err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); + err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev); if (err) goto err_nexthop_neigh_init; @@ -3916,7 +4171,7 @@ err_nexthop_neigh_init: static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { - mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); list_del(&nh->router_list_node); mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); @@ -3938,14 +4193,14 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, switch (event) { case FIB_EVENT_NH_ADD: - mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh); + mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev); break; case FIB_EVENT_NH_DEL: - mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); break; } - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); } static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, @@ -3968,7 +4223,7 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, } __mlxsw_sp_nexthop_neigh_update(nh, removing); - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); } } @@ -3991,10 +4246,450 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) { mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp); } } +static int +mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_single_info *nh, + struct netlink_ext_ack *extack) +{ + int err = -EINVAL; + + if (nh->is_fdb) + NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported"); + else if (nh->has_encap) + NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported"); + else + err = 0; + + return err; +} + +static int +mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_grp_info *nh_grp, + struct netlink_ext_ack *extack) +{ + int i; + + if (nh_grp->is_fdb) { + NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported"); + return -EINVAL; + } + + for (i = 0; i < nh_grp->num_nh; i++) { + const struct nh_notifier_single_info *nh; + int err; + + nh = &nh_grp->nh_entries[i].nh; + err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, + extack); + if (err) + return err; + + /* Device only nexthops with an IPIP device are programmed as + * encapsulating adjacency entries. + */ + if (!nh->gw_family && !nh->is_reject && + !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) { + NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway"); + return -EINVAL; + } + } + + return 0; +} + +static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp, + unsigned long event, + struct nh_notifier_info *info) +{ + if (event != NEXTHOP_EVENT_REPLACE) + return 0; + + if (!info->is_grp) + return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh, + info->extack); + return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp, + info->extack); +} + +static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp, + const struct nh_notifier_info *info) +{ + const struct net_device *dev; + + if (info->is_grp) + /* Already validated earlier. */ + return true; + + dev = info->nh->dev; + return info->nh->gw_family || info->nh->is_reject || + mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); +} + +static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + u16 lb_rif_index = mlxsw_sp->router->lb_rif_index; + + nh->discard = 1; + nh->should_offload = 1; + /* While nexthops that discard packets do not forward packets + * via an egress RIF, they still need to be programmed using a + * valid RIF, so use the loopback RIF created during init. + */ + nh->rif = mlxsw_sp->router->rifs[lb_rif_index]; +} + +static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + nh->rif = NULL; + nh->should_offload = 0; +} + +static int +mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct nh_notifier_single_info *nh_obj, int weight) +{ + struct net_device *dev = nh_obj->dev; + int err; + + nh->nhgi = nh_grp->nhgi; + nh->nh_weight = weight; + + switch (nh_obj->gw_family) { + case AF_INET: + memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4)); + nh->neigh_tbl = &arp_tbl; + break; + case AF_INET6: + memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6)); +#if IS_ENABLED(CONFIG_IPV6) + nh->neigh_tbl = &nd_tbl; +#endif + break; + } + + mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); + list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); + nh->ifindex = dev->ifindex; + + err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev); + if (err) + goto err_type_init; + + if (nh_obj->is_reject) + mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh); + + return 0; + +err_type_init: + list_del(&nh->router_list_node); + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); + return err; +} + +static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + if (nh->discard) + mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); + list_del(&nh->router_list_node); + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); +} + +static int +mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct nh_notifier_info *info) +{ + unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1; + struct mlxsw_sp_nexthop_group_info *nhgi; + struct mlxsw_sp_nexthop *nh; + int err, i; + + nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL); + if (!nhgi) + return -ENOMEM; + nh_grp->nhgi = nhgi; + nhgi->nh_grp = nh_grp; + nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info); + nhgi->count = nhs; + for (i = 0; i < nhgi->count; i++) { + struct nh_notifier_single_info *nh_obj; + int weight; + + nh = &nhgi->nexthops[i]; + if (info->is_grp) { + nh_obj = &info->nh_grp->nh_entries[i].nh; + weight = info->nh_grp->nh_entries[i].weight; + } else { + nh_obj = info->nh; + weight = 1; + } + err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, + weight); + if (err) + goto err_nexthop_obj_init; + } + err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + if (err) { + NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device"); + goto err_group_refresh; + } + + return 0; + +err_group_refresh: + i = nhgi->count; +err_nexthop_obj_init: + for (i--; i >= 0; i--) { + nh = &nhgi->nexthops[i]; + mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh); + } + kfree(nhgi); + return err; +} + +static void +mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; + int i; + + for (i = nhgi->count - 1; i >= 0; i--) { + struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; + + mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON_ONCE(nhgi->adj_index_valid); + kfree(nhgi); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp, + struct nh_notifier_info *info) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + int err; + + nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->vr_list); + err = rhashtable_init(&nh_grp->vr_ht, + &mlxsw_sp_nexthop_group_vr_ht_params); + if (err) + goto err_nexthop_group_vr_ht_init; + INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ; + nh_grp->obj.id = info->id; + + err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info); + if (err) + goto err_nexthop_group_info_init; + + nh_grp->can_destroy = false; + + return nh_grp; + +err_nexthop_group_info_init: + rhashtable_destroy(&nh_grp->vr_ht); +err_nexthop_group_vr_ht_init: + kfree(nh_grp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + if (!nh_grp->can_destroy) + return; + mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp); + WARN_ON_ONCE(!list_empty(&nh_grp->fib_list)); + WARN_ON_ONCE(!list_empty(&nh_grp->vr_list)); + rhashtable_destroy(&nh_grp->vr_ht); + kfree(nh_grp); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id) +{ + struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; + + cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ; + cmp_arg.id = id; + return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, + &cmp_arg, + mlxsw_sp_nexthop_group_ht_params); +} + +static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); +} + +static int +mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop_group *old_nh_grp, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi; + struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi; + int err; + + old_nh_grp->nhgi = new_nhgi; + new_nhgi->nh_grp = old_nh_grp; + nh_grp->nhgi = old_nhgi; + old_nhgi->nh_grp = nh_grp; + + if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) { + /* Both the old adjacency index and the new one are valid. + * Routes are currently using the old one. Tell the device to + * replace the old adjacency index with the new one. + */ + err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp, + old_nhgi->adj_index, + old_nhgi->ecmp_size); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one"); + goto err_out; + } + } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) { + /* The old adjacency index is valid, while the new one is not. + * Iterate over all the routes using the group and change them + * to trap packets to the CPU. + */ + err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets"); + goto err_out; + } + } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) { + /* The old adjacency index is invalid, while the new one is. + * Iterate over all the routes using the group and change them + * to forward packets using the new valid index. + */ + err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets"); + goto err_out; + } + } + + /* Make sure the flags are set / cleared based on the new nexthop group + * information. + */ + mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp); + + /* At this point 'nh_grp' is just a shell that is not used by anyone + * and its nexthop group info is the old info that was just replaced + * with the new one. Remove it. + */ + nh_grp->can_destroy = true; + mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); + + return 0; + +err_out: + old_nhgi->nh_grp = old_nh_grp; + nh_grp->nhgi = new_nhgi; + new_nhgi->nh_grp = nh_grp; + old_nh_grp->nhgi = old_nhgi; + return err; +} + +static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp, + struct nh_notifier_info *info) +{ + struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp; + struct netlink_ext_ack *extack = info->extack; + int err; + + nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + + old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id); + if (!old_nh_grp) + err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp); + else + err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp, + old_nh_grp, extack); + + if (err) { + nh_grp->can_destroy = true; + mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); + } + + return err; +} + +static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp, + struct nh_notifier_info *info) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id); + if (!nh_grp) + return; + + nh_grp->can_destroy = true; + mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); + + /* If the group still has routes using it, then defer the delete + * operation until the last route using it is deleted. + */ + if (!list_empty(&nh_grp->fib_list)) + return; + mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); +} + +static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct nh_notifier_info *info = ptr; + struct mlxsw_sp_router *router; + int err = 0; + + router = container_of(nb, struct mlxsw_sp_router, nexthop_nb); + err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info); + if (err) + goto out; + + mutex_lock(&router->lock); + + ASSERT_RTNL(); + + switch (event) { + case NEXTHOP_EVENT_REPLACE: + err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info); + break; + case NEXTHOP_EVENT_DEL: + mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info); + break; + default: + break; + } + + mutex_unlock(&router->lock); + +out: + return notifier_from_errno(err); +} + static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { @@ -4004,46 +4699,102 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); } +static int +mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi); + struct mlxsw_sp_nexthop_group_info *nhgi; + struct mlxsw_sp_nexthop *nh; + int err, i; + + nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL); + if (!nhgi) + return -ENOMEM; + nh_grp->nhgi = nhgi; + nhgi->nh_grp = nh_grp; + nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi); + nhgi->count = nhs; + for (i = 0; i < nhgi->count; i++) { + struct fib_nh *fib_nh; + + nh = &nhgi->nexthops[i]; + fib_nh = fib_info_nh(nh_grp->ipv4.fi, i); + err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); + if (err) + goto err_nexthop4_init; + } + err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + if (err) + goto err_group_refresh; + + return 0; + +err_group_refresh: + i = nhgi->count; +err_nexthop4_init: + for (i--; i >= 0; i--) { + nh = &nhgi->nexthops[i]; + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); + } + kfree(nhgi); + return err; +} + +static void +mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; + int i; + + for (i = nhgi->count - 1; i >= 0; i--) { + struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; + + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON_ONCE(nhgi->adj_index_valid); + kfree(nhgi); +} + static struct mlxsw_sp_nexthop_group * mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { - unsigned int nhs = fib_info_num_path(fi); struct mlxsw_sp_nexthop_group *nh_grp; - struct mlxsw_sp_nexthop *nh; - struct fib_nh *fib_nh; - int i; int err; - nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL); + nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); - nh_grp->priv = fi; + INIT_LIST_HEAD(&nh_grp->vr_list); + err = rhashtable_init(&nh_grp->vr_ht, + &mlxsw_sp_nexthop_group_vr_ht_params); + if (err) + goto err_nexthop_group_vr_ht_init; INIT_LIST_HEAD(&nh_grp->fib_list); - nh_grp->neigh_tbl = &arp_tbl; - - nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi); - nh_grp->count = nhs; + nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4; + nh_grp->ipv4.fi = fi; fib_info_hold(fi); - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; - fib_nh = fib_info_nh(fi, i); - err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); - if (err) - goto err_nexthop4_init; - } + + err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp); + if (err) + goto err_nexthop_group_info_init; + err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); if (err) goto err_nexthop_group_insert; - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + + nh_grp->can_destroy = true; + return nh_grp; err_nexthop_group_insert: -err_nexthop4_init: - for (i--; i >= 0; i--) { - nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); - } + mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp); +err_nexthop_group_info_init: fib_info_put(fi); + rhashtable_destroy(&nh_grp->vr_ht); +err_nexthop_group_vr_ht_init: kfree(nh_grp); return ERR_PTR(err); } @@ -4052,17 +4803,13 @@ static void mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { - struct mlxsw_sp_nexthop *nh; - int i; - + if (!nh_grp->can_destroy) + return; mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); - for (i = 0; i < nh_grp->count; i++) { - nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); - } - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); - WARN_ON_ONCE(nh_grp->adj_index_valid); - fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp)); + mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp); + fib_info_put(nh_grp->ipv4.fi); + WARN_ON_ONCE(!list_empty(&nh_grp->vr_list)); + rhashtable_destroy(&nh_grp->vr_ht); kfree(nh_grp); } @@ -4072,12 +4819,21 @@ static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group *nh_grp; + if (fi->nh) { + nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, + fi->nh->id); + if (WARN_ON_ONCE(!nh_grp)) + return -EINVAL; + goto out; + } + nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi); if (!nh_grp) { nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) return PTR_ERR(nh_grp); } +out: list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list); fib_entry->nh_group = nh_grp; return 0; @@ -4091,6 +4847,12 @@ static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp, list_del(&fib_entry->nexthop_group_node); if (!list_empty(&nh_grp->fib_list)) return; + + if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) { + mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); + return; + } + mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp); } @@ -4120,9 +4882,9 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: - return !!nh_group->adj_index_valid; + return !!nh_group->nhgi->adj_index_valid; case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: - return !!nh_group->nh_rif; + return !!nh_group->nhgi->nh_rif; case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: @@ -4138,8 +4900,8 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, { int i; - for (i = 0; i < nh_grp->count; i++) { - struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + for (i = 0; i < nh_grp->nhgi->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i]; struct fib6_info *rt = mlxsw_sp_rt6->rt; if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev && @@ -4156,7 +4918,6 @@ static void mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { - struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group); u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr; int dst_len = fib_entry->fib_node->key.prefix_len; struct mlxsw_sp_fib4_entry *fib4_entry; @@ -4166,7 +4927,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp, should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry); fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, common); - fri.fi = fi; + fri.fi = fib4_entry->fi; fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; @@ -4181,7 +4942,6 @@ static void mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { - struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group); u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr; int dst_len = fib_entry->fib_node->key.prefix_len; struct mlxsw_sp_fib4_entry *fib4_entry; @@ -4189,7 +4949,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp, fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, common); - fri.fi = fi; + fri.fi = fib4_entry->fi; fri.tb_id = fib4_entry->tb_id; fri.dst = cpu_to_be32(*p_dst); fri.dst_len = dst_len; @@ -4264,13 +5024,14 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { switch (op) { - case MLXSW_REG_RALUE_OP_WRITE_WRITE: + case MLXSW_SP_FIB_ENTRY_OP_WRITE: + case MLXSW_SP_FIB_ENTRY_OP_UPDATE: mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry); break; - case MLXSW_REG_RALUE_OP_WRITE_DELETE: + case MLXSW_SP_FIB_ENTRY_OP_DELETE: mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry); break; default: @@ -4278,33 +5039,133 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp, } } +struct mlxsw_sp_fib_entry_op_ctx_basic { + char ralue_pl[MLXSW_REG_RALUE_LEN]; +}; + static void -mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, - const struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_sp_l3proto proto, + enum mlxsw_sp_fib_entry_op op, + u16 virtual_router, u8 prefix_len, + unsigned char *addr, + struct mlxsw_sp_fib_entry_priv *priv) { - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; - enum mlxsw_reg_ralxx_protocol proto; - u32 *p_dip; + struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv; + enum mlxsw_reg_ralxx_protocol ralxx_proto; + char *ralue_pl = op_ctx_basic->ralue_pl; + enum mlxsw_reg_ralue_op ralue_op; - proto = (enum mlxsw_reg_ralxx_protocol) fib->proto; + ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto; - switch (fib->proto) { + switch (op) { + case MLXSW_SP_FIB_ENTRY_OP_WRITE: + case MLXSW_SP_FIB_ENTRY_OP_UPDATE: + ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE; + break; + case MLXSW_SP_FIB_ENTRY_OP_DELETE: + ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE; + break; + default: + WARN_ON_ONCE(1); + return; + } + + switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: - p_dip = (u32 *) fib_entry->fib_node->key.addr; - mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id, - fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op, + virtual_router, prefix_len, (u32 *) addr); break; case MLXSW_SP_L3_PROTO_IPV6: - mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id, - fib_entry->fib_node->key.prefix_len, - fib_entry->fib_node->key.addr); + mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op, + virtual_router, prefix_len, addr); break; } } -static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index) +static void +mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u32 adjacency_index, u16 ecmp_size) +{ + struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv; + + mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action, + trap_id, adjacency_index, ecmp_size); +} + +static void +mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u16 local_erif) +{ + struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv; + + mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action, + trap_id, local_erif); +} + +static void +mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx) +{ + struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv; + + mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl); +} + +static void +mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + u32 tunnel_ptr) +{ + struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv; + + mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr); +} + +static int +mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + bool *postponed_for_bulk) +{ + struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv; + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), + op_ctx_basic->ralue_pl); +} + +static bool +mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv) +{ + return true; +} + +static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_sp_fib_entry_op op) +{ + struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; + + mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv); + fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + fib_entry->fib_node->key.addr, + fib_entry->priv); +} + +static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + const struct mlxsw_sp_router_ll_ops *ll_ops) +{ + bool postponed_for_bulk = false; + int err; + + err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk); + if (!postponed_for_bulk) + mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx); + return err; +} + +static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp) { enum mlxsw_reg_ratr_trap_action trap_action; char ratr_pl[MLXSW_REG_RATR_LEN]; @@ -4318,11 +5179,13 @@ static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index) if (err) return err; - trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS; + trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, MLXSW_REG_RATR_TYPE_ETHERNET, - mlxsw_sp->router->adj_discard_index, rif_index); + mlxsw_sp->router->adj_discard_index, + mlxsw_sp->router->lb_rif_index); mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); + mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); if (err) goto err_ratr_write; @@ -4338,11 +5201,13 @@ err_ratr_write: } static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; - char ralue_pl[MLXSW_REG_RALUE_LEN]; + struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -4355,12 +5220,10 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, */ if (mlxsw_sp_fib_entry_should_offload(fib_entry)) { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; - adjacency_index = fib_entry->nh_group->adj_index; - ecmp_size = fib_entry->nh_group->ecmp_size; - } else if (!nh_group->adj_index_valid && nh_group->count && - nh_group->nh_rif) { - err = mlxsw_sp_adj_discard_write(mlxsw_sp, - nh_group->nh_rif->rif_index); + adjacency_index = nhgi->adj_index; + ecmp_size = nhgi->ecmp_size; + } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) { + err = mlxsw_sp_adj_discard_write(mlxsw_sp); if (err) return err; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; @@ -4371,19 +5234,20 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); - mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, - adjacency_index, ecmp_size); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id, + adjacency_index, ecmp_size); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { - struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; + struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif; enum mlxsw_reg_ralue_trap_action trap_action; - char ralue_pl[MLXSW_REG_RALUE_LEN]; u16 trap_id = 0; u16 rif_index = 0; @@ -4395,111 +5259,124 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); - mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, - rif_index); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { - char ralue_pl[MLXSW_REG_RALUE_LEN]; + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; - mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); - mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_ip2me_pack(op_ctx); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; enum mlxsw_reg_ralue_trap_action trap_action; - char ralue_pl[MLXSW_REG_RALUE_LEN]; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR; - mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); - mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; enum mlxsw_reg_ralue_trap_action trap_action; - char ralue_pl[MLXSW_REG_RALUE_LEN]; u16 trap_id; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; trap_id = MLXSW_TRAP_ID_RTR_INGRESS1; - mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); - mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry; const struct mlxsw_sp_ipip_ops *ipip_ops; + int err; if (WARN_ON(!ipip_entry)) return -EINVAL; ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; - return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op, - fib_entry->decap.tunnel_index); + err = ipip_ops->decap_config(mlxsw_sp, ipip_entry, + fib_entry->decap.tunnel_index); + if (err) + return err; + + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx, + fib_entry->decap.tunnel_index); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { - char ralue_pl[MLXSW_REG_RALUE_LEN]; + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; - mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); - mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, - fib_entry->decap.tunnel_index); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); + mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op); + ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx, + fib_entry->decap.tunnel_index); + return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops); } static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: - return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: - return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: - return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: - return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE: - return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry, - op); + return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: - return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, - fib_entry, op); + return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: - return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op); } return -EINVAL; } static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) + enum mlxsw_sp_fib_entry_op op) { - int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); + int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op); if (err) return err; @@ -4509,18 +5386,35 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, return err; } +static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_entry *fib_entry, + bool is_new) +{ + return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, + is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE : + MLXSW_SP_FIB_ENTRY_OP_UPDATE); +} + static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { - return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, - MLXSW_REG_RALUE_OP_WRITE_WRITE); + struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx; + + mlxsw_sp_fib_entry_op_ctx_clear(op_ctx); + return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false); } static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry) { - return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, - MLXSW_REG_RALUE_OP_WRITE_DELETE); + const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops; + + if (!ll_ops->fib_entry_is_committed(fib_entry->priv)) + return 0; + return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, + MLXSW_SP_FIB_ENTRY_OP_DELETE); } static int @@ -4528,17 +5422,17 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, struct mlxsw_sp_fib_entry *fib_entry) { - struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; + struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi; union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) }; struct mlxsw_sp_router *router = mlxsw_sp->router; u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id); + int ifindex = nhgi->nexthops[0].ifindex; struct mlxsw_sp_ipip_entry *ipip_entry; - struct fib_info *fi = fen_info->fi; switch (fen_info->type) { case RTN_LOCAL: - ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev, - MLXSW_SP_L3_PROTO_IPV4, dip); + ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex, + MLXSW_SP_L3_PROTO_IPV4, dip); if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, @@ -4571,7 +5465,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE; return 0; case RTN_UNICAST: - if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi)) + if (nhgi->gateway) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; @@ -4608,15 +5502,27 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, return ERR_PTR(-ENOMEM); fib_entry = &fib4_entry->common; - err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); - if (err) - goto err_fib4_entry_type_set; + fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops); + if (IS_ERR(fib_entry->priv)) { + err = PTR_ERR(fib_entry->priv); + goto err_fib_entry_priv_create; + } err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi); if (err) goto err_nexthop4_group_get; - fib4_entry->prio = fen_info->fi->fib_priority; + err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group, + fib_node->fib); + if (err) + goto err_nexthop_group_vr_link; + + err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); + if (err) + goto err_fib4_entry_type_set; + + fib4_entry->fi = fen_info->fi; + fib_info_hold(fib4_entry->fi); fib4_entry->tb_id = fen_info->tb_id; fib4_entry->type = fen_info->type; fib4_entry->tos = fen_info->tos; @@ -4625,9 +5531,13 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, return fib4_entry; -err_nexthop4_group_get: - mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry); err_fib4_entry_type_set: + mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib); +err_nexthop_group_vr_link: + mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); +err_nexthop4_group_get: + mlxsw_sp_fib_entry_priv_put(fib_entry->priv); +err_fib_entry_priv_create: kfree(fib4_entry); return ERR_PTR(err); } @@ -4635,8 +5545,14 @@ err_fib4_entry_type_set: static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib4_entry *fib4_entry) { - mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; + + fib_info_put(fib4_entry->fi); mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group, + fib_node->fib); + mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv); kfree(fib4_entry); } @@ -4665,8 +5581,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, if (fib4_entry->tb_id == fen_info->tb_id && fib4_entry->tos == fen_info->tos && fib4_entry->type == fen_info->type && - mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) == - fen_info->fi) + fib4_entry->fi == fen_info->fi) return fib4_entry; return NULL; @@ -4875,14 +5790,16 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + bool is_new = !fib_node->fib_entry; int err; fib_node->fib_entry = fib_entry; - err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); + err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new); if (err) goto err_fib_entry_update; @@ -4893,14 +5810,25 @@ err_fib_entry_update: return err; } -static void -mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + int err; - mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); + err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry); fib_node->fib_entry = NULL; + return err; +} + +static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx; + + mlxsw_sp_fib_entry_op_ctx_clear(op_ctx); + __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry); } static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry) @@ -4922,6 +5850,7 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry) static int mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, const struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced; @@ -4955,7 +5884,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, } replaced = fib_node->fib_entry; - err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common); + err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); goto err_fib_node_entry_link; @@ -4980,23 +5909,26 @@ err_fib4_entry_create: return err; } -static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, - struct fib_entry_notifier_info *fen_info) +static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct fib_entry_notifier_info *fen_info) { struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; + int err; if (mlxsw_sp->router->aborted) - return; + return 0; fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); if (!fib4_entry) - return; + return 0; fib_node = fib4_entry->common.fib_node; - mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common); + err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return err; } static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) @@ -5047,7 +5979,8 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) { struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; - fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; + if (!mlxsw_sp_rt6->rt->nh) + fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); kfree(mlxsw_sp_rt6); } @@ -5081,51 +6014,6 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret); } -static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - struct mlxsw_sp_nexthop *nh, - const struct fib6_info *rt) -{ - const struct mlxsw_sp_ipip_ops *ipip_ops; - struct mlxsw_sp_ipip_entry *ipip_entry; - struct net_device *dev = rt->fib6_nh->fib_nh_dev; - struct mlxsw_sp_rif *rif; - int err; - - ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev); - if (ipip_entry) { - ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; - if (ipip_ops->can_offload(mlxsw_sp, dev, - MLXSW_SP_L3_PROTO_IPV6)) { - nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; - mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry); - return 0; - } - } - - nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!rif) - return 0; - mlxsw_sp_nexthop_rif_init(nh, rif); - - err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); - if (err) - goto err_nexthop_neigh_init; - - return 0; - -err_nexthop_neigh_init: - mlxsw_sp_nexthop_rif_fini(nh); - return err; -} - -static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) -{ - mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); -} - static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh, @@ -5133,9 +6021,12 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, { struct net_device *dev = rt->fib6_nh->fib_nh_dev; - nh->nh_grp = nh_grp; + nh->nhgi = nh_grp->nhgi; nh->nh_weight = rt->fib6_nh->fib_nh_weight; memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr)); +#if IS_ENABLED(CONFIG_IPV6) + nh->neigh_tbl = &nd_tbl; +#endif mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); @@ -5144,13 +6035,13 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, return 0; nh->ifindex = dev->ifindex; - return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt); + return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev); } static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { - mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh); list_del(&nh->router_list_node); mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); } @@ -5162,51 +6053,105 @@ static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); } -static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib6_entry *fib6_entry) +static int +mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_fib6_entry *fib6_entry) { - struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_nexthop_group_info *nhgi; struct mlxsw_sp_rt6 *mlxsw_sp_rt6; struct mlxsw_sp_nexthop *nh; - int i = 0; - int err; + int err, i; - nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6), - GFP_KERNEL); - if (!nh_grp) - return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&nh_grp->fib_list); -#if IS_ENABLED(CONFIG_IPV6) - nh_grp->neigh_tbl = &nd_tbl; -#endif + nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6), + GFP_KERNEL); + if (!nhgi) + return -ENOMEM; + nh_grp->nhgi = nhgi; + nhgi->nh_grp = nh_grp; mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, list); - nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); - nh_grp->count = fib6_entry->nrt6; - for (i = 0; i < nh_grp->count; i++) { + nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); + nhgi->count = fib6_entry->nrt6; + for (i = 0; i < nhgi->count; i++) { struct fib6_info *rt = mlxsw_sp_rt6->rt; - nh = &nh_grp->nexthops[i]; + nh = &nhgi->nexthops[i]; err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); if (err) goto err_nexthop6_init; mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); } + nh_grp->nhgi = nhgi; + err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + if (err) + goto err_group_refresh; + + return 0; + +err_group_refresh: + i = nhgi->count; +err_nexthop6_init: + for (i--; i >= 0; i--) { + nh = &nhgi->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + kfree(nhgi); + return err; +} + +static void +mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; + int i; + + for (i = nhgi->count - 1; i >= 0; i--) { + struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; + + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON_ONCE(nhgi->adj_index_valid); + kfree(nhgi); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + int err; + + nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->vr_list); + err = rhashtable_init(&nh_grp->vr_ht, + &mlxsw_sp_nexthop_group_vr_ht_params); + if (err) + goto err_nexthop_group_vr_ht_init; + INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6; + + err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry); + if (err) + goto err_nexthop_group_info_init; err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); if (err) goto err_nexthop_group_insert; - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + nh_grp->can_destroy = true; + return nh_grp; err_nexthop_group_insert: -err_nexthop6_init: - for (i--; i >= 0; i--) { - nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); - } + mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp); +err_nexthop_group_info_init: + rhashtable_destroy(&nh_grp->vr_ht); +err_nexthop_group_vr_ht_init: kfree(nh_grp); return ERR_PTR(err); } @@ -5215,24 +6160,29 @@ static void mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { - struct mlxsw_sp_nexthop *nh; - int i = nh_grp->count; - + if (!nh_grp->can_destroy) + return; mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); - for (i--; i >= 0; i--) { - nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); - } - mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); - WARN_ON(nh_grp->adj_index_valid); + mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp); + WARN_ON_ONCE(!list_empty(&nh_grp->vr_list)); + rhashtable_destroy(&nh_grp->vr_ht); kfree(nh_grp); } static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry) { + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); struct mlxsw_sp_nexthop_group *nh_grp; + if (rt->nh) { + nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, + rt->nh->id); + if (WARN_ON_ONCE(!nh_grp)) + return -EINVAL; + goto out; + } + nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry); if (!nh_grp) { nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); @@ -5240,15 +6190,16 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(nh_grp); } - list_add_tail(&fib6_entry->common.nexthop_group_node, - &nh_grp->fib_list); - fib6_entry->common.nh_group = nh_grp; - /* The route and the nexthop are described by the same struct, so we * need to the update the nexthop offload indication for the new route. */ __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry); +out: + list_add_tail(&fib6_entry->common.nexthop_group_node, + &nh_grp->fib_list); + fib6_entry->common.nh_group = nh_grp; + return 0; } @@ -5260,16 +6211,24 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp, list_del(&fib_entry->nexthop_group_node); if (!list_empty(&nh_grp->fib_list)) return; + + if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) { + mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp); + return; + } + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp); } -static int -mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib6_entry *fib6_entry) +static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib6_entry *fib6_entry) { struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group; + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; int err; + mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib); fib6_entry->common.nh_group = NULL; list_del(&fib6_entry->common.nexthop_group_node); @@ -5277,11 +6236,17 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop6_group_get; + err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group, + fib_node->fib); + if (err) + goto err_nexthop_group_vr_link; + /* In case this entry is offloaded, then the adjacency index * currently associated with it in the device's table is that * of the old group. Start using the new one instead. */ - err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common); + err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, + &fib6_entry->common, false); if (err) goto err_fib_entry_update; @@ -5291,16 +6256,21 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, return 0; err_fib_entry_update: + mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group, + fib_node->fib); +err_nexthop_group_vr_link: mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); err_nexthop6_group_get: list_add_tail(&fib6_entry->common.nexthop_group_node, &old_nh_grp->fib_list); fib6_entry->common.nh_group = old_nh_grp; + mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib); return err; } static int mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib6_entry *fib6_entry, struct fib6_info **rt_arr, unsigned int nrt6) { @@ -5318,7 +6288,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, fib6_entry->nrt6++; } - err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry); if (err) goto err_nexthop6_group_update; @@ -5339,6 +6309,7 @@ err_rt6_create: static void mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib6_entry *fib6_entry, struct fib6_info **rt_arr, unsigned int nrt6) { @@ -5356,26 +6327,20 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); } - mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry); } static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, const struct fib6_info *rt) { - /* Packets hitting RTF_REJECT routes need to be discarded by the - * stack. We can rely on their destination device not having a - * RIF (it's the loopback device) and can thus use action type - * local, which will cause them to be trapped with a lower - * priority than packets that need to be locally received. - */ if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; else if (rt->fib6_type == RTN_BLACKHOLE) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; else if (rt->fib6_flags & RTF_REJECT) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE; - else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) + else if (fib_entry->nh_group->nhgi->gateway) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; @@ -5409,6 +6374,12 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, return ERR_PTR(-ENOMEM); fib_entry = &fib6_entry->common; + fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops); + if (IS_ERR(fib_entry->priv)) { + err = PTR_ERR(fib_entry->priv); + goto err_fib_entry_priv_create; + } + INIT_LIST_HEAD(&fib6_entry->rt6_list); for (i = 0; i < nrt6; i++) { @@ -5421,16 +6392,23 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, fib6_entry->nrt6++; } - mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); - err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); if (err) goto err_nexthop6_group_get; + err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group, + fib_node->fib); + if (err) + goto err_nexthop_group_vr_link; + + mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + fib_entry->fib_node = fib_node; return fib6_entry; +err_nexthop_group_vr_link: + mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry); err_nexthop6_group_get: i = nrt6; err_rt6_create: @@ -5441,6 +6419,8 @@ err_rt6_create: list_del(&mlxsw_sp_rt6->list); mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); } + mlxsw_sp_fib_entry_priv_put(fib_entry->priv); +err_fib_entry_priv_create: kfree(fib6_entry); return ERR_PTR(err); } @@ -5448,9 +6428,14 @@ err_rt6_create: static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry) { + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + + mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group, + fib_node->fib); mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry); WARN_ON(fib6_entry->nrt6); + mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv); kfree(fib6_entry); } @@ -5508,8 +6493,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry) } static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, - struct fib6_info **rt_arr, - unsigned int nrt6) + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct fib6_info **rt_arr, unsigned int nrt6) { struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced; struct mlxsw_sp_fib_entry *replaced; @@ -5548,7 +6533,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, } replaced = fib_node->fib_entry; - err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common); + err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common); if (err) goto err_fib_node_entry_link; @@ -5572,8 +6557,8 @@ err_fib6_entry_create: } static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp, - struct fib6_info **rt_arr, - unsigned int nrt6) + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct fib6_info **rt_arr, unsigned int nrt6) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5604,8 +6589,7 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp, fib6_entry = container_of(fib_node->fib_entry, struct mlxsw_sp_fib6_entry, common); - err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr, - nrt6); + err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6); if (err) goto err_fib6_entry_nexthop_add; @@ -5616,19 +6600,20 @@ err_fib6_entry_nexthop_add: return err; } -static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, - struct fib6_info **rt_arr, - unsigned int nrt6) +static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct fib6_info **rt_arr, unsigned int nrt6) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; struct fib6_info *rt = rt_arr[0]; + int err; if (mlxsw_sp->router->aborted) - return; + return 0; if (mlxsw_sp_fib6_rt_should_ignore(rt)) - return; + return 0; /* Multipath routes are first added to the FIB trie and only then * notified. If we vetoed the addition, we will get a delete @@ -5637,58 +6622,66 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, */ fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); if (!fib6_entry) - return; + return 0; /* If not all the nexthops are deleted, then only reduce the nexthop * group. */ if (nrt6 != fib6_entry->nrt6) { - mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr, - nrt6); - return; + mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6); + return 0; } fib_node = fib6_entry->common.fib_node; - mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common); + err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common); mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return err; } static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_reg_ralxx_protocol proto, + enum mlxsw_sp_l3proto proto, u8 tree_id) { - char ralta_pl[MLXSW_REG_RALTA_LEN]; - char ralst_pl[MLXSW_REG_RALST_LEN]; + const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto]; + enum mlxsw_reg_ralxx_protocol ralxx_proto = + (enum mlxsw_reg_ralxx_protocol) proto; + struct mlxsw_sp_fib_entry_priv *priv; + char xralta_pl[MLXSW_REG_XRALTA_LEN]; + char xralst_pl[MLXSW_REG_XRALST_LEN]; int i, err; - mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); + mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id); + err = ll_ops->ralta_write(mlxsw_sp, xralta_pl); if (err) return err; - mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); + mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id); + err = ll_ops->ralst_write(mlxsw_sp, xralst_pl); if (err) return err; for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx; struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; - char raltb_pl[MLXSW_REG_RALTB_LEN]; - char ralue_pl[MLXSW_REG_RALUE_LEN]; + char xraltb_pl[MLXSW_REG_XRALTB_LEN]; - mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), - raltb_pl); + mlxsw_sp_fib_entry_op_ctx_clear(op_ctx); + mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id); + err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl); if (err) return err; - mlxsw_reg_ralue_pack(ralue_pl, proto, - MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0); - mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), - ralue_pl); + priv = mlxsw_sp_fib_entry_priv_create(ll_ops); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE, + vr->id, 0, NULL, priv); + ll_ops->fib_entry_act_ip2me_pack(op_ctx); + err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL); + mlxsw_sp_fib_entry_priv_put(priv); if (err) return err; } @@ -5784,7 +6777,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) { - enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; + enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4; int err; err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, @@ -5796,7 +6789,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) * packets that don't match any routes are trapped to the CPU. */ - proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; + proto = MLXSW_SP_L3_PROTO_IPV6; return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, MLXSW_SP_LPM_TREE_MIN + 1); } @@ -5901,15 +6894,15 @@ static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n"); } -struct mlxsw_sp_fib6_event_work { +struct mlxsw_sp_fib6_event { struct fib6_info **rt_arr; unsigned int nrt6; }; -struct mlxsw_sp_fib_event_work { - struct work_struct work; +struct mlxsw_sp_fib_event { + struct list_head list; /* node in fib queue */ union { - struct mlxsw_sp_fib6_event_work fib6_work; + struct mlxsw_sp_fib6_event fib6_event; struct fib_entry_notifier_info fen_info; struct fib_rule_notifier_info fr_info; struct fib_nh_notifier_info fnh_info; @@ -5918,11 +6911,12 @@ struct mlxsw_sp_fib_event_work { }; struct mlxsw_sp *mlxsw_sp; unsigned long event; + int family; }; static int -mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work, - struct fib6_entry_notifier_info *fen6_info) +mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event, + struct fib6_entry_notifier_info *fen6_info) { struct fib6_info *rt = fen6_info->rt; struct fib6_info **rt_arr; @@ -5936,8 +6930,8 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work, if (!rt_arr) return -ENOMEM; - fib6_work->rt_arr = rt_arr; - fib6_work->nrt6 = nrt6; + fib6_event->rt_arr = rt_arr; + fib6_event->nrt6 = nrt6; rt_arr[0] = rt; fib6_info_hold(rt); @@ -5959,170 +6953,233 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work, } static void -mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work) +mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event) { int i; - for (i = 0; i < fib6_work->nrt6; i++) - mlxsw_sp_rt6_release(fib6_work->rt_arr[i]); - kfree(fib6_work->rt_arr); + for (i = 0; i < fib6_event->nrt6; i++) + mlxsw_sp_rt6_release(fib6_event->rt_arr[i]); + kfree(fib6_event->rt_arr); } -static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) +static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_event *fib_event) { - struct mlxsw_sp_fib_event_work *fib_work = - container_of(work, struct mlxsw_sp_fib_event_work, work); - struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; int err; - mutex_lock(&mlxsw_sp->router->lock); mlxsw_sp_span_respin(mlxsw_sp); - switch (fib_work->event) { + switch (fib_event->event) { case FIB_EVENT_ENTRY_REPLACE: - err = mlxsw_sp_router_fib4_replace(mlxsw_sp, - &fib_work->fen_info); - if (err) + err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info); + if (err) { + mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx); mlxsw_sp_router_fib_abort(mlxsw_sp); - fib_info_put(fib_work->fen_info.fi); + } + fib_info_put(fib_event->fen_info.fi); break; case FIB_EVENT_ENTRY_DEL: - mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); - fib_info_put(fib_work->fen_info.fi); + err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info); + if (err) + mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx); + fib_info_put(fib_event->fen_info.fi); break; case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: - mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, - fib_work->fnh_info.fib_nh); - fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); + mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh); + fib_info_put(fib_event->fnh_info.fib_nh->nh_parent); break; } - mutex_unlock(&mlxsw_sp->router->lock); - kfree(fib_work); } -static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) +static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_event *fib_event) { - struct mlxsw_sp_fib_event_work *fib_work = - container_of(work, struct mlxsw_sp_fib_event_work, work); - struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; int err; - mutex_lock(&mlxsw_sp->router->lock); mlxsw_sp_span_respin(mlxsw_sp); - switch (fib_work->event) { + switch (fib_event->event) { case FIB_EVENT_ENTRY_REPLACE: - err = mlxsw_sp_router_fib6_replace(mlxsw_sp, - fib_work->fib6_work.rt_arr, - fib_work->fib6_work.nrt6); - if (err) + err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr, + fib_event->fib6_event.nrt6); + if (err) { + mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx); mlxsw_sp_router_fib_abort(mlxsw_sp); - mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); + } + mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event); break; case FIB_EVENT_ENTRY_APPEND: - err = mlxsw_sp_router_fib6_append(mlxsw_sp, - fib_work->fib6_work.rt_arr, - fib_work->fib6_work.nrt6); - if (err) + err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr, + fib_event->fib6_event.nrt6); + if (err) { + mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx); mlxsw_sp_router_fib_abort(mlxsw_sp); - mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); + } + mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event); break; case FIB_EVENT_ENTRY_DEL: - mlxsw_sp_router_fib6_del(mlxsw_sp, - fib_work->fib6_work.rt_arr, - fib_work->fib6_work.nrt6); - mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work); + err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr, + fib_event->fib6_event.nrt6); + if (err) + mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx); + mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event); break; } - mutex_unlock(&mlxsw_sp->router->lock); - kfree(fib_work); } -static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) +static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_event *fib_event) { - struct mlxsw_sp_fib_event_work *fib_work = - container_of(work, struct mlxsw_sp_fib_event_work, work); - struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; bool replace; int err; rtnl_lock(); mutex_lock(&mlxsw_sp->router->lock); - switch (fib_work->event) { + switch (fib_event->event) { case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_ADD: - replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; + replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE; - err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info, - replace); + err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); - mr_cache_put(fib_work->men_info.mfc); + mr_cache_put(fib_event->men_info.mfc); break; case FIB_EVENT_ENTRY_DEL: - mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); - mr_cache_put(fib_work->men_info.mfc); + mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info); + mr_cache_put(fib_event->men_info.mfc); break; case FIB_EVENT_VIF_ADD: err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, - &fib_work->ven_info); + &fib_event->ven_info); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); - dev_put(fib_work->ven_info.dev); + dev_put(fib_event->ven_info.dev); break; case FIB_EVENT_VIF_DEL: - mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, - &fib_work->ven_info); - dev_put(fib_work->ven_info.dev); + mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info); + dev_put(fib_event->ven_info.dev); break; } mutex_unlock(&mlxsw_sp->router->lock); rtnl_unlock(); - kfree(fib_work); } -static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, +static void mlxsw_sp_router_fib_event_work(struct work_struct *work) +{ + struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work); + struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx; + struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp; + struct mlxsw_sp_fib_event *next_fib_event; + struct mlxsw_sp_fib_event *fib_event; + int last_family = AF_UNSPEC; + LIST_HEAD(fib_event_queue); + + spin_lock_bh(&router->fib_event_queue_lock); + list_splice_init(&router->fib_event_queue, &fib_event_queue); + spin_unlock_bh(&router->fib_event_queue_lock); + + /* Router lock is held here to make sure per-instance + * operation context is not used in between FIB4/6 events + * processing. + */ + mutex_lock(&router->lock); + mlxsw_sp_fib_entry_op_ctx_clear(op_ctx); + list_for_each_entry_safe(fib_event, next_fib_event, + &fib_event_queue, list) { + /* Check if the next entry in the queue exists and it is + * of the same type (family and event) as the currect one. + * In that case it is permitted to do the bulking + * of multiple FIB entries to a single register write. + */ + op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) && + fib_event->family == next_fib_event->family && + fib_event->event == next_fib_event->event; + op_ctx->event = fib_event->event; + + /* In case family of this and the previous entry are different, context + * reinitialization is going to be needed now, indicate that. + * Note that since last_family is initialized to AF_UNSPEC, this is always + * going to happen for the first entry processed in the work. + */ + if (fib_event->family != last_family) + op_ctx->initialized = false; + + switch (fib_event->family) { + case AF_INET: + mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx, + fib_event); + break; + case AF_INET6: + mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx, + fib_event); + break; + case RTNL_FAMILY_IP6MR: + case RTNL_FAMILY_IPMR: + /* Unlock here as inside FIBMR the lock is taken again + * under RTNL. The per-instance operation context + * is not used by FIBMR. + */ + mutex_unlock(&router->lock); + mlxsw_sp_router_fibmr_event_process(mlxsw_sp, + fib_event); + mutex_lock(&router->lock); + break; + default: + WARN_ON_ONCE(1); + } + last_family = fib_event->family; + kfree(fib_event); + cond_resched(); + } + WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list)); + mutex_unlock(&router->lock); +} + +static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event, struct fib_notifier_info *info) { struct fib_entry_notifier_info *fen_info; struct fib_nh_notifier_info *fnh_info; - switch (fib_work->event) { + switch (fib_event->event) { case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); - fib_work->fen_info = *fen_info; + fib_event->fen_info = *fen_info; /* Take reference on fib_info to prevent it from being - * freed while work is queued. Release it afterwards. + * freed while event is queued. Release it afterwards. */ - fib_info_hold(fib_work->fen_info.fi); + fib_info_hold(fib_event->fen_info.fi); break; case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, info); - fib_work->fnh_info = *fnh_info; - fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + fib_event->fnh_info = *fnh_info; + fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent); break; } } -static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, +static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event, struct fib_notifier_info *info) { struct fib6_entry_notifier_info *fen6_info; int err; - switch (fib_work->event) { + switch (fib_event->event) { case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_APPEND: case FIB_EVENT_ENTRY_DEL: fen6_info = container_of(info, struct fib6_entry_notifier_info, info); - err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work, - fen6_info); + err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event, + fen6_info); if (err) return err; break; @@ -6132,20 +7189,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, } static void -mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, +mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event, struct fib_notifier_info *info) { - switch (fib_work->event) { + switch (fib_event->event) { case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_ADD: case FIB_EVENT_ENTRY_DEL: - memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); - mr_cache_hold(fib_work->men_info.mfc); + memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info)); + mr_cache_hold(fib_event->men_info.mfc); break; case FIB_EVENT_VIF_ADD: case FIB_EVENT_VIF_DEL: - memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); - dev_hold(fib_work->ven_info.dev); + memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info)); + dev_hold(fib_event->ven_info.dev); break; } } @@ -6202,7 +7259,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event, static int mlxsw_sp_router_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) { - struct mlxsw_sp_fib_event_work *fib_work; + struct mlxsw_sp_fib_event *fib_event; struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; int err; @@ -6234,55 +7291,43 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported"); return notifier_from_errno(-EINVAL); } - if (fen_info->fi->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); - return notifier_from_errno(-EINVAL); - } - } else if (info->family == AF_INET6) { - struct fib6_entry_notifier_info *fen6_info; - - fen6_info = container_of(info, - struct fib6_entry_notifier_info, - info); - if (fen6_info->rt->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported"); - return notifier_from_errno(-EINVAL); - } } break; } - fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); - if (!fib_work) + fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC); + if (!fib_event) return NOTIFY_BAD; - fib_work->mlxsw_sp = router->mlxsw_sp; - fib_work->event = event; + fib_event->mlxsw_sp = router->mlxsw_sp; + fib_event->event = event; + fib_event->family = info->family; switch (info->family) { case AF_INET: - INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); - mlxsw_sp_router_fib4_event(fib_work, info); + mlxsw_sp_router_fib4_event(fib_event, info); break; case AF_INET6: - INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); - err = mlxsw_sp_router_fib6_event(fib_work, info); + err = mlxsw_sp_router_fib6_event(fib_event, info); if (err) goto err_fib_event; break; case RTNL_FAMILY_IP6MR: case RTNL_FAMILY_IPMR: - INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); - mlxsw_sp_router_fibmr_event(fib_work, info); + mlxsw_sp_router_fibmr_event(fib_event, info); break; } - mlxsw_core_schedule_work(&fib_work->work); + /* Enqueue the event and trigger the work */ + spin_lock_bh(&router->fib_event_queue_lock); + list_add_tail(&fib_event->list, &router->fib_event_queue); + spin_unlock_bh(&router->fib_event_queue_lock); + mlxsw_core_schedule_work(&router->fib_event_work); return NOTIFY_DONE; err_fib_event: - kfree(fib_work); + kfree(fib_event); return NOTIFY_BAD; } @@ -6671,9 +7716,9 @@ static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif) } static int -mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, - struct net_device *l3_dev, - struct netlink_ext_ack *extack) +__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, + struct net_device *l3_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -6738,6 +7783,27 @@ __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) mlxsw_sp_rif_subport_put(rif); } +int +mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, + struct net_device *l3_dev, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err = 0; + + mutex_lock(&mlxsw_sp->router->lock); + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!rif) + goto out; + + err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev, + extack); +out: + mutex_unlock(&mlxsw_sp->router->lock); + return err; +} + void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { @@ -6762,8 +7828,8 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, switch (event) { case NETDEV_UP: - return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, - l3_dev, extack); + return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, + l3_dev, extack); case NETDEV_DOWN: __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); break; @@ -6831,6 +7897,15 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, switch (event) { case NETDEV_UP: + if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) { + u16 proto; + + br_vlan_get_proto(l3_dev, &proto); + if (proto == ETH_P_8021AD) { + NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported"); + return -EOPNOTSUPP; + } + } rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); if (IS_ERR(rif)) return PTR_ERR(rif); @@ -8057,6 +9132,70 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); } +static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = { + .init = mlxsw_sp_router_ll_basic_init, + .ralta_write = mlxsw_sp_router_ll_basic_ralta_write, + .ralst_write = mlxsw_sp_router_ll_basic_ralst_write, + .raltb_write = mlxsw_sp_router_ll_basic_raltb_write, + .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic), + .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack, + .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack, + .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack, + .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack, + .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack, + .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit, + .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed, +}; + +static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router) +{ + size_t max_size = 0; + int i; + + for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) { + size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size; + + if (size > max_size) + max_size = size; + } + router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size, + GFP_KERNEL); + if (!router->ll_op_ctx) + return -ENOMEM; + INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list); + return 0; +} + +static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router) +{ + WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list)); + kfree(router->ll_op_ctx); +} + +static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp) +{ + u16 lb_rif_index; + int err; + + /* Create a generic loopback RIF associated with the main table + * (default VRF). Any table can be used, but the main table exists + * anyway, so we do not waste resources. + */ + err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, + &lb_rif_index); + if (err) + return err; + + mlxsw_sp->router->lb_rif_index = lb_rif_index; + + return 0; +} + +static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index); +} + int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, struct netlink_ext_ack *extack) { @@ -8070,6 +9209,19 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; + err = mlxsw_sp_router_xm_init(mlxsw_sp); + if (err) + goto err_xm_init; + + router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = mlxsw_sp_router_xm_ipv4_is_supported(mlxsw_sp) ? + &mlxsw_sp_router_ll_xm_ops : + &mlxsw_sp_router_ll_basic_ops; + router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops; + + err = mlxsw_sp_router_ll_op_ctx_init(router); + if (err) + goto err_ll_op_ctx_init; + INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -8106,6 +9258,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_vrs_init; + err = mlxsw_sp_lb_rif_init(mlxsw_sp); + if (err) + goto err_lb_rif_init; + err = mlxsw_sp_neigh_init(mlxsw_sp); if (err) goto err_neigh_init; @@ -8118,6 +9274,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_dscp_init; + INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work); + INIT_LIST_HEAD(&router->fib_event_queue); + spin_lock_init(&router->fib_event_queue_lock); + router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; err = register_inetaddr_notifier(&router->inetaddr_nb); if (err) @@ -8134,6 +9294,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_register_netevent_notifier; + mlxsw_sp->router->nexthop_nb.notifier_call = + mlxsw_sp_nexthop_obj_event; + err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp), + &mlxsw_sp->router->nexthop_nb, + extack); + if (err) + goto err_register_nexthop_notifier; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb, @@ -8144,6 +9312,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, return 0; err_register_fib_notifier: + unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp), + &mlxsw_sp->router->nexthop_nb); +err_register_nexthop_notifier: unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); err_register_netevent_notifier: unregister_inet6addr_notifier(&router->inet6addr_nb); @@ -8151,10 +9322,13 @@ err_register_inet6addr_notifier: unregister_inetaddr_notifier(&router->inetaddr_nb); err_register_inetaddr_notifier: mlxsw_core_flush_owq(); + WARN_ON(!list_empty(&router->fib_event_queue)); err_dscp_init: err_mp_hash_init: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: + mlxsw_sp_lb_rif_fini(mlxsw_sp); +err_lb_rif_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: mlxsw_sp_mr_fini(mlxsw_sp); @@ -8171,6 +9345,10 @@ err_ipips_init: err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_router_ll_op_ctx_fini(router); +err_ll_op_ctx_init: + mlxsw_sp_router_xm_fini(mlxsw_sp); +err_xm_init: mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); return err; @@ -8180,11 +9358,15 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &mlxsw_sp->router->fib_nb); + unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp), + &mlxsw_sp->router->nexthop_nb); unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); mlxsw_core_flush_owq(); + WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue)); mlxsw_sp_neigh_fini(mlxsw_sp); + mlxsw_sp_lb_rif_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_mr_fini(mlxsw_sp); mlxsw_sp_lpm_fini(mlxsw_sp); @@ -8193,6 +9375,8 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router); + mlxsw_sp_router_xm_fini(mlxsw_sp); mutex_destroy(&mlxsw_sp->router->lock); kfree(mlxsw_sp->router); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 8418dc3ae967..2875ee8ec537 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -15,6 +15,27 @@ struct mlxsw_sp_router_nve_decap { u8 valid:1; }; +struct mlxsw_sp_fib_entry_op_ctx { + u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk + * the actual entry with the one that is the next + * in queue. + */ + initialized:1; /* Bit that the low-level op sets in case + * the context priv is initialized. + */ + struct list_head fib_entry_priv_list; + unsigned long event; + unsigned long ll_priv[]; +}; + +static inline void +mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx) +{ + WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list)); + memset(op_ctx, 0, sizeof(*op_ctx)); + INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list); +} + struct mlxsw_sp_router { struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp_rif **rifs; @@ -38,6 +59,7 @@ struct mlxsw_sp_router { struct list_head nexthop_neighs_list; struct list_head ipip_list; bool aborted; + struct notifier_block nexthop_nb; struct notifier_block fib_nb; struct notifier_block netevent_nb; struct notifier_block inetaddr_nb; @@ -48,6 +70,56 @@ struct mlxsw_sp_router { bool adj_discard_index_valid; struct mlxsw_sp_router_nve_decap nve_decap_config; struct mutex lock; /* Protects shared router resources */ + struct work_struct fib_event_work; + struct list_head fib_event_queue; + spinlock_t fib_event_queue_lock; /* Protects fib event queue list */ + /* One set of ops for each protocol: IPv4 and IPv6 */ + const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX]; + struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx; + u16 lb_rif_index; + struct mlxsw_sp_router_xm *xm; +}; + +struct mlxsw_sp_fib_entry_priv { + refcount_t refcnt; + struct list_head list; /* Member in op_ctx->fib_entry_priv_list */ + unsigned long priv[]; +}; + +enum mlxsw_sp_fib_entry_op { + MLXSW_SP_FIB_ENTRY_OP_WRITE, + MLXSW_SP_FIB_ENTRY_OP_UPDATE, + MLXSW_SP_FIB_ENTRY_OP_DELETE, +}; + +/* Low-level router ops. Basically this is to handle the different + * register sets to work with ordinary and XM trees and FIB entries. + */ +struct mlxsw_sp_router_ll_ops { + int (*init)(struct mlxsw_sp *mlxsw_sp, u16 vr_id, + enum mlxsw_sp_l3proto proto); + int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl); + int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl); + int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl); + size_t fib_entry_op_ctx_size; + size_t fib_entry_priv_size; + void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op, + u16 virtual_router, u8 prefix_len, unsigned char *addr, + struct mlxsw_sp_fib_entry_priv *priv); + void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u32 adjacency_index, u16 ecmp_size); + void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u16 local_erif); + void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx); + void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + u32 tunnel_ptr); + int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + bool *postponed_for_bulk); + bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv); }; struct mlxsw_sp_rif_ipip_lb; @@ -129,6 +201,7 @@ int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, u32 *p_adj_size, u32 *p_adj_hash_index); struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); +bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh); #define mlxsw_sp_nexthop_for_each(nh, router) \ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \ nh = mlxsw_sp_nexthop_next(router, nh)) @@ -150,4 +223,10 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp); +extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops; + +int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp); +bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp); + #endif /* _MLXSW_ROUTER_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c new file mode 100644 index 000000000000..d213af723a2a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c @@ -0,0 +1,812 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/rhashtable.h> + +#include "spectrum.h" +#include "core.h" +#include "reg.h" +#include "spectrum_router.h" + +#define MLXSW_SP_ROUTER_XM_M_VAL 16 + +static const u8 mlxsw_sp_router_xm_m_val[] = { + [MLXSW_SP_L3_PROTO_IPV4] = MLXSW_SP_ROUTER_XM_M_VAL, + [MLXSW_SP_L3_PROTO_IPV6] = 0, /* Currently unused. */ +}; + +#define MLXSW_SP_ROUTER_XM_L_VAL_MAX 16 + +struct mlxsw_sp_router_xm { + bool ipv4_supported; + bool ipv6_supported; + unsigned int entries_size; + struct rhashtable ltable_ht; + struct rhashtable flush_ht; /* Stores items about to be flushed from cache */ + unsigned int flush_count; + bool flush_all_mode; +}; + +struct mlxsw_sp_router_xm_ltable_node { + struct rhash_head ht_node; /* Member of router_xm->ltable_ht */ + u16 mindex; + u8 current_lvalue; + refcount_t refcnt; + unsigned int lvalue_ref[MLXSW_SP_ROUTER_XM_L_VAL_MAX + 1]; +}; + +static const struct rhashtable_params mlxsw_sp_router_xm_ltable_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, mindex), + .head_offset = offsetof(struct mlxsw_sp_router_xm_ltable_node, ht_node), + .key_len = sizeof(u16), + .automatic_shrinking = true, +}; + +struct mlxsw_sp_router_xm_flush_info { + bool all; + enum mlxsw_sp_l3proto proto; + u16 virtual_router; + u8 prefix_len; + unsigned char addr[sizeof(struct in6_addr)]; +}; + +struct mlxsw_sp_router_xm_fib_entry { + bool committed; + struct mlxsw_sp_router_xm_ltable_node *ltable_node; /* Parent node */ + u16 mindex; /* Store for processing from commit op */ + u8 lvalue; + struct mlxsw_sp_router_xm_flush_info flush_info; +}; + +#define MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX \ + (MLXSW_REG_XMDR_TRANS_LEN / MLXSW_REG_XMDR_C_LT_ROUTE_V4_LEN) + +struct mlxsw_sp_fib_entry_op_ctx_xm { + bool initialized; + char xmdr_pl[MLXSW_REG_XMDR_LEN]; + unsigned int trans_offset; /* Offset of the current command within one + * transaction of XMDR register. + */ + unsigned int trans_item_len; /* The current command length. This is used + * to advance 'trans_offset' when the next + * command is appended. + */ + unsigned int entries_count; + struct mlxsw_sp_router_xm_fib_entry *entries[MLXSW_SP_ROUTE_LL_XM_ENTRIES_MAX]; +}; + +static int mlxsw_sp_router_ll_xm_init(struct mlxsw_sp *mlxsw_sp, u16 vr_id, + enum mlxsw_sp_l3proto proto) +{ + char rxlte_pl[MLXSW_REG_RXLTE_LEN]; + + mlxsw_reg_rxlte_pack(rxlte_pl, vr_id, + (enum mlxsw_reg_rxlte_protocol) proto, true); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxlte), rxlte_pl); +} + +static int mlxsw_sp_router_ll_xm_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl) +{ + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralta), xralta_pl); +} + +static int mlxsw_sp_router_ll_xm_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl) +{ + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xralst), xralst_pl); +} + +static int mlxsw_sp_router_ll_xm_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl) +{ + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xraltb), xraltb_pl); +} + +static u16 mlxsw_sp_router_ll_xm_mindex_get4(const u32 addr) +{ + /* Currently the M-index is set to linear mode. That means it is defined + * as 16 MSB of IP address. + */ + return addr >> MLXSW_SP_ROUTER_XM_L_VAL_MAX; +} + +static u16 mlxsw_sp_router_ll_xm_mindex_get6(const unsigned char *addr) +{ + WARN_ON_ONCE(1); + return 0; /* currently unused */ +} + +static void mlxsw_sp_router_ll_xm_op_ctx_check_init(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm) +{ + if (op_ctx->initialized) + return; + op_ctx->initialized = true; + + mlxsw_reg_xmdr_pack(op_ctx_xm->xmdr_pl, true); + op_ctx_xm->trans_offset = 0; + op_ctx_xm->entries_count = 0; +} + +static void mlxsw_sp_router_ll_xm_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_sp_l3proto proto, + enum mlxsw_sp_fib_entry_op op, + u16 virtual_router, u8 prefix_len, + unsigned char *addr, + struct mlxsw_sp_fib_entry_priv *priv) +{ + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv; + struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv; + struct mlxsw_sp_router_xm_flush_info *flush_info; + enum mlxsw_reg_xmdr_c_ltr_op xmdr_c_ltr_op; + unsigned int len; + + mlxsw_sp_router_ll_xm_op_ctx_check_init(op_ctx, op_ctx_xm); + + switch (op) { + case MLXSW_SP_FIB_ENTRY_OP_WRITE: + xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_WRITE; + break; + case MLXSW_SP_FIB_ENTRY_OP_UPDATE: + xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_UPDATE; + break; + case MLXSW_SP_FIB_ENTRY_OP_DELETE: + xmdr_c_ltr_op = MLXSW_REG_XMDR_C_LTR_OP_DELETE; + break; + default: + WARN_ON_ONCE(1); + return; + } + + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + len = mlxsw_reg_xmdr_c_ltr_pack4(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset, + op_ctx_xm->entries_count, xmdr_c_ltr_op, + virtual_router, prefix_len, (u32 *) addr); + fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get4(*((u32 *) addr)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + len = mlxsw_reg_xmdr_c_ltr_pack6(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset, + op_ctx_xm->entries_count, xmdr_c_ltr_op, + virtual_router, prefix_len, addr); + fib_entry->mindex = mlxsw_sp_router_ll_xm_mindex_get6(addr); + break; + default: + WARN_ON_ONCE(1); + return; + } + if (!op_ctx_xm->trans_offset) + op_ctx_xm->trans_item_len = len; + else + WARN_ON_ONCE(op_ctx_xm->trans_item_len != len); + + op_ctx_xm->entries[op_ctx_xm->entries_count] = fib_entry; + + fib_entry->lvalue = prefix_len > mlxsw_sp_router_xm_m_val[proto] ? + prefix_len - mlxsw_sp_router_xm_m_val[proto] : 0; + + flush_info = &fib_entry->flush_info; + flush_info->proto = proto; + flush_info->virtual_router = virtual_router; + flush_info->prefix_len = prefix_len; + if (addr) + memcpy(flush_info->addr, addr, sizeof(flush_info->addr)); + else + memset(flush_info->addr, 0, sizeof(flush_info->addr)); +} + +static void +mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u32 adjacency_index, u16 ecmp_size) +{ + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv; + + mlxsw_reg_xmdr_c_ltr_act_remote_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset, + trap_action, trap_id, adjacency_index, ecmp_size); +} + +static void +mlxsw_sp_router_ll_xm_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u16 local_erif) +{ + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv; + + mlxsw_reg_xmdr_c_ltr_act_local_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset, + trap_action, trap_id, local_erif); +} + +static void +mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx) +{ + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv; + + mlxsw_reg_xmdr_c_ltr_act_ip2me_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset); +} + +static void +mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + u32 tunnel_ptr) +{ + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv; + + mlxsw_reg_xmdr_c_ltr_act_ip2me_tun_pack(op_ctx_xm->xmdr_pl, op_ctx_xm->trans_offset, + tunnel_ptr); +} + +static struct mlxsw_sp_router_xm_ltable_node * +mlxsw_sp_router_xm_ltable_node_get(struct mlxsw_sp_router_xm *router_xm, u16 mindex) +{ + struct mlxsw_sp_router_xm_ltable_node *ltable_node; + int err; + + ltable_node = rhashtable_lookup_fast(&router_xm->ltable_ht, &mindex, + mlxsw_sp_router_xm_ltable_ht_params); + if (ltable_node) { + refcount_inc(<able_node->refcnt); + return ltable_node; + } + ltable_node = kzalloc(sizeof(*ltable_node), GFP_KERNEL); + if (!ltable_node) + return ERR_PTR(-ENOMEM); + ltable_node->mindex = mindex; + refcount_set(<able_node->refcnt, 1); + + err = rhashtable_insert_fast(&router_xm->ltable_ht, <able_node->ht_node, + mlxsw_sp_router_xm_ltable_ht_params); + if (err) + goto err_insert; + + return ltable_node; + +err_insert: + kfree(ltable_node); + return ERR_PTR(err); +} + +static void mlxsw_sp_router_xm_ltable_node_put(struct mlxsw_sp_router_xm *router_xm, + struct mlxsw_sp_router_xm_ltable_node *ltable_node) +{ + if (!refcount_dec_and_test(<able_node->refcnt)) + return; + rhashtable_remove_fast(&router_xm->ltable_ht, <able_node->ht_node, + mlxsw_sp_router_xm_ltable_ht_params); + kfree(ltable_node); +} + +static int mlxsw_sp_router_xm_ltable_lvalue_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_router_xm_ltable_node *ltable_node) +{ + char xrmt_pl[MLXSW_REG_XRMT_LEN]; + + mlxsw_reg_xrmt_pack(xrmt_pl, ltable_node->mindex, ltable_node->current_lvalue); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xrmt), xrmt_pl); +} + +struct mlxsw_sp_router_xm_flush_node { + struct rhash_head ht_node; /* Member of router_xm->flush_ht */ + struct list_head list; + struct mlxsw_sp_router_xm_flush_info flush_info; + struct delayed_work dw; + struct mlxsw_sp *mlxsw_sp; + unsigned long start_jiffies; + unsigned int reuses; /* By how many flush calls this was reused. */ + refcount_t refcnt; +}; + +static const struct rhashtable_params mlxsw_sp_router_xm_flush_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, flush_info), + .head_offset = offsetof(struct mlxsw_sp_router_xm_flush_node, ht_node), + .key_len = sizeof(struct mlxsw_sp_router_xm_flush_info), + .automatic_shrinking = true, +}; + +static struct mlxsw_sp_router_xm_flush_node * +mlxsw_sp_router_xm_cache_flush_node_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_router_xm_flush_info *flush_info) +{ + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + struct mlxsw_sp_router_xm_flush_node *flush_node; + int err; + + flush_node = kzalloc(sizeof(*flush_node), GFP_KERNEL); + if (!flush_node) + return ERR_PTR(-ENOMEM); + + flush_node->flush_info = *flush_info; + err = rhashtable_insert_fast(&router_xm->flush_ht, &flush_node->ht_node, + mlxsw_sp_router_xm_flush_ht_params); + if (err) { + kfree(flush_node); + return ERR_PTR(err); + } + router_xm->flush_count++; + flush_node->mlxsw_sp = mlxsw_sp; + flush_node->start_jiffies = jiffies; + refcount_set(&flush_node->refcnt, 1); + return flush_node; +} + +static void +mlxsw_sp_router_xm_cache_flush_node_hold(struct mlxsw_sp_router_xm_flush_node *flush_node) +{ + if (!flush_node) + return; + refcount_inc(&flush_node->refcnt); +} + +static void +mlxsw_sp_router_xm_cache_flush_node_put(struct mlxsw_sp_router_xm_flush_node *flush_node) +{ + if (!flush_node || !refcount_dec_and_test(&flush_node->refcnt)) + return; + kfree(flush_node); +} + +static void +mlxsw_sp_router_xm_cache_flush_node_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_router_xm_flush_node *flush_node) +{ + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + + router_xm->flush_count--; + rhashtable_remove_fast(&router_xm->flush_ht, &flush_node->ht_node, + mlxsw_sp_router_xm_flush_ht_params); + mlxsw_sp_router_xm_cache_flush_node_put(flush_node); +} + +static u32 mlxsw_sp_router_xm_flush_mask4(u8 prefix_len) +{ + return GENMASK(31, 32 - prefix_len); +} + +static unsigned char *mlxsw_sp_router_xm_flush_mask6(u8 prefix_len) +{ + static unsigned char mask[sizeof(struct in6_addr)]; + + memset(mask, 0, sizeof(mask)); + memset(mask, 0xff, prefix_len / 8); + mask[prefix_len / 8] = GENMASK(8, 8 - prefix_len % 8); + return mask; +} + +#define MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT 15 +#define MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES 15 +#define MLXSW_SP_ROUTER_XM_CACHE_DELAY 50 /* usecs */ +#define MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT (MLXSW_SP_ROUTER_XM_CACHE_DELAY * 10) + +static void mlxsw_sp_router_xm_cache_flush_work(struct work_struct *work) +{ + struct mlxsw_sp_router_xm_flush_info *flush_info; + struct mlxsw_sp_router_xm_flush_node *flush_node; + char rlcmld_pl[MLXSW_REG_RLCMLD_LEN]; + enum mlxsw_reg_rlcmld_select select; + struct mlxsw_sp *mlxsw_sp; + u32 addr4; + int err; + + flush_node = container_of(work, struct mlxsw_sp_router_xm_flush_node, + dw.work); + mlxsw_sp = flush_node->mlxsw_sp; + flush_info = &flush_node->flush_info; + + if (flush_info->all) { + char rlpmce_pl[MLXSW_REG_RLPMCE_LEN]; + + mlxsw_reg_rlpmce_pack(rlpmce_pl, true, false); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlpmce), + rlpmce_pl); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n"); + + if (flush_node->reuses < + MLXSW_SP_ROUTER_XM_CACHE_FLUSH_ALL_MIN_REUSES) + /* Leaving flush-all mode. */ + mlxsw_sp->router->xm->flush_all_mode = false; + goto out; + } + + select = MLXSW_REG_RLCMLD_SELECT_M_AND_ML_ENTRIES; + + switch (flush_info->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + addr4 = *((u32 *) flush_info->addr); + addr4 &= mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len); + + /* In case the flush prefix length is bigger than M-value, + * it makes no sense to flush M entries. So just flush + * the ML entries. + */ + if (flush_info->prefix_len > MLXSW_SP_ROUTER_XM_M_VAL) + select = MLXSW_REG_RLCMLD_SELECT_ML_ENTRIES; + + mlxsw_reg_rlcmld_pack4(rlcmld_pl, select, + flush_info->virtual_router, addr4, + mlxsw_sp_router_xm_flush_mask4(flush_info->prefix_len)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_rlcmld_pack6(rlcmld_pl, select, + flush_info->virtual_router, flush_info->addr, + mlxsw_sp_router_xm_flush_mask6(flush_info->prefix_len)); + break; + default: + WARN_ON(true); + goto out; + } + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rlcmld), rlcmld_pl); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n"); + +out: + mlxsw_sp_router_xm_cache_flush_node_destroy(mlxsw_sp, flush_node); +} + +static bool +mlxsw_sp_router_xm_cache_flush_may_cancel(struct mlxsw_sp_router_xm_flush_node *flush_node) +{ + unsigned long max_wait = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_MAX_WAIT); + unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY); + + /* In case there is the same flushing work pending, check + * if we can consolidate with it. We can do it up to MAX_WAIT. + * Cancel the delayed work. If the work was still pending. + */ + if (time_is_before_jiffies(flush_node->start_jiffies + max_wait - delay) && + cancel_delayed_work_sync(&flush_node->dw)) + return true; + return false; +} + +static int +mlxsw_sp_router_xm_cache_flush_schedule(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_router_xm_flush_info *flush_info) +{ + unsigned long delay = usecs_to_jiffies(MLXSW_SP_ROUTER_XM_CACHE_DELAY); + struct mlxsw_sp_router_xm_flush_info flush_all_info = {.all = true}; + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + struct mlxsw_sp_router_xm_flush_node *flush_node; + + /* Check if the queued number of flushes reached critical amount after + * which it is better to just flush the whole cache. + */ + if (router_xm->flush_count == MLXSW_SP_ROUTER_XM_CACHE_PARALLEL_FLUSHES_LIMIT) + /* Entering flush-all mode. */ + router_xm->flush_all_mode = true; + + if (router_xm->flush_all_mode) + flush_info = &flush_all_info; + + rcu_read_lock(); + flush_node = rhashtable_lookup_fast(&router_xm->flush_ht, flush_info, + mlxsw_sp_router_xm_flush_ht_params); + /* Take a reference so the object is not freed before possible + * delayed work cancel could be done. + */ + mlxsw_sp_router_xm_cache_flush_node_hold(flush_node); + rcu_read_unlock(); + + if (flush_node && mlxsw_sp_router_xm_cache_flush_may_cancel(flush_node)) { + flush_node->reuses++; + mlxsw_sp_router_xm_cache_flush_node_put(flush_node); + /* Original work was within wait period and was canceled. + * That means that the reference is still held and the + * flush_node_put() call above did not free the flush_node. + * Reschedule it with fresh delay. + */ + goto schedule_work; + } else { + mlxsw_sp_router_xm_cache_flush_node_put(flush_node); + } + + flush_node = mlxsw_sp_router_xm_cache_flush_node_create(mlxsw_sp, flush_info); + if (IS_ERR(flush_node)) + return PTR_ERR(flush_node); + INIT_DELAYED_WORK(&flush_node->dw, mlxsw_sp_router_xm_cache_flush_work); + +schedule_work: + mlxsw_core_schedule_dw(&flush_node->dw, delay); + return 0; +} + +static int +mlxsw_sp_router_xm_ml_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_router_xm_fib_entry *fib_entry) +{ + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + struct mlxsw_sp_router_xm_ltable_node *ltable_node; + u8 lvalue = fib_entry->lvalue; + int err; + + ltable_node = mlxsw_sp_router_xm_ltable_node_get(router_xm, + fib_entry->mindex); + if (IS_ERR(ltable_node)) + return PTR_ERR(ltable_node); + if (lvalue > ltable_node->current_lvalue) { + /* The L-value is bigger then the one currently set, update. */ + ltable_node->current_lvalue = lvalue; + err = mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, + ltable_node); + if (err) + goto err_lvalue_set; + + /* The L value for prefix/M is increased. + * Therefore, all entries in M and ML caches matching + * {prefix/M, proto, VR} need to be flushed. Set the flush + * prefix length to M to achieve that. + */ + fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL; + } + + ltable_node->lvalue_ref[lvalue]++; + fib_entry->ltable_node = ltable_node; + + return 0; + +err_lvalue_set: + mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node); + return err; +} + +static void +mlxsw_sp_router_xm_ml_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_router_xm_fib_entry *fib_entry) +{ + struct mlxsw_sp_router_xm_ltable_node *ltable_node = + fib_entry->ltable_node; + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + u8 lvalue = fib_entry->lvalue; + + ltable_node->lvalue_ref[lvalue]--; + if (lvalue == ltable_node->current_lvalue && lvalue && + !ltable_node->lvalue_ref[lvalue]) { + u8 new_lvalue = lvalue - 1; + + /* Find the biggest L-value left out there. */ + while (new_lvalue > 0 && !ltable_node->lvalue_ref[lvalue]) + new_lvalue--; + + ltable_node->current_lvalue = new_lvalue; + mlxsw_sp_router_xm_ltable_lvalue_set(mlxsw_sp, ltable_node); + + /* The L value for prefix/M is decreased. + * Therefore, all entries in M and ML caches matching + * {prefix/M, proto, VR} need to be flushed. Set the flush + * prefix length to M to achieve that. + */ + fib_entry->flush_info.prefix_len = MLXSW_SP_ROUTER_XM_M_VAL; + } + mlxsw_sp_router_xm_ltable_node_put(router_xm, ltable_node); +} + +static int +mlxsw_sp_router_xm_ml_entries_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm) +{ + struct mlxsw_sp_router_xm_fib_entry *fib_entry; + int err; + int i; + + for (i = 0; i < op_ctx_xm->entries_count; i++) { + fib_entry = op_ctx_xm->entries[i]; + err = mlxsw_sp_router_xm_ml_entry_add(mlxsw_sp, fib_entry); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + fib_entry = op_ctx_xm->entries[i]; + mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry); + } + return err; +} + +static void +mlxsw_sp_router_xm_ml_entries_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm) +{ + struct mlxsw_sp_router_xm_fib_entry *fib_entry; + int i; + + for (i = 0; i < op_ctx_xm->entries_count; i++) { + fib_entry = op_ctx_xm->entries[i]; + mlxsw_sp_router_xm_ml_entry_del(mlxsw_sp, fib_entry); + } +} + +static void +mlxsw_sp_router_xm_ml_entries_cache_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm) +{ + struct mlxsw_sp_router_xm_fib_entry *fib_entry; + int err; + int i; + + for (i = 0; i < op_ctx_xm->entries_count; i++) { + fib_entry = op_ctx_xm->entries[i]; + err = mlxsw_sp_router_xm_cache_flush_schedule(mlxsw_sp, + &fib_entry->flush_info); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to flush XM cache\n"); + } +} + +static int mlxsw_sp_router_ll_xm_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry_op_ctx *op_ctx, + bool *postponed_for_bulk) +{ + struct mlxsw_sp_fib_entry_op_ctx_xm *op_ctx_xm = (void *) op_ctx->ll_priv; + struct mlxsw_sp_router_xm_fib_entry *fib_entry; + u8 num_rec; + int err; + int i; + + op_ctx_xm->trans_offset += op_ctx_xm->trans_item_len; + op_ctx_xm->entries_count++; + + /* Check if bulking is possible and there is still room for another + * FIB entry record. The size of 'trans_item_len' is either size of IPv4 + * command or size of IPv6 command. Not possible to mix those in a + * single XMDR write. + */ + if (op_ctx->bulk_ok && + op_ctx_xm->trans_offset + op_ctx_xm->trans_item_len <= MLXSW_REG_XMDR_TRANS_LEN) { + if (postponed_for_bulk) + *postponed_for_bulk = true; + return 0; + } + + if (op_ctx->event == FIB_EVENT_ENTRY_REPLACE) { + /* The L-table is updated inside. It has to be done before + * the prefix is inserted. + */ + err = mlxsw_sp_router_xm_ml_entries_add(mlxsw_sp, op_ctx_xm); + if (err) + goto out; + } + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(xmdr), op_ctx_xm->xmdr_pl); + if (err) + goto out; + num_rec = mlxsw_reg_xmdr_num_rec_get(op_ctx_xm->xmdr_pl); + if (num_rec > op_ctx_xm->entries_count) { + dev_err(mlxsw_sp->bus_info->dev, "Invalid XMDR number of records\n"); + err = -EIO; + goto out; + } + for (i = 0; i < num_rec; i++) { + if (!mlxsw_reg_xmdr_reply_vect_get(op_ctx_xm->xmdr_pl, i)) { + dev_err(mlxsw_sp->bus_info->dev, "Command send over XMDR failed\n"); + err = -EIO; + goto out; + } else { + fib_entry = op_ctx_xm->entries[i]; + fib_entry->committed = true; + } + } + + if (op_ctx->event == FIB_EVENT_ENTRY_DEL) + /* The L-table is updated inside. It has to be done after + * the prefix was removed. + */ + mlxsw_sp_router_xm_ml_entries_del(mlxsw_sp, op_ctx_xm); + + /* At the very end, do the XLT cache flushing to evict stale + * M and ML cache entries after prefixes were inserted/removed. + */ + mlxsw_sp_router_xm_ml_entries_cache_flush(mlxsw_sp, op_ctx_xm); + +out: + /* Next pack call is going to do reinitialization */ + op_ctx->initialized = false; + return err; +} + +static bool mlxsw_sp_router_ll_xm_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv) +{ + struct mlxsw_sp_router_xm_fib_entry *fib_entry = (void *) priv->priv; + + return fib_entry->committed; +} + +const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops = { + .init = mlxsw_sp_router_ll_xm_init, + .ralta_write = mlxsw_sp_router_ll_xm_ralta_write, + .ralst_write = mlxsw_sp_router_ll_xm_ralst_write, + .raltb_write = mlxsw_sp_router_ll_xm_raltb_write, + .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_xm), + .fib_entry_priv_size = sizeof(struct mlxsw_sp_router_xm_fib_entry), + .fib_entry_pack = mlxsw_sp_router_ll_xm_fib_entry_pack, + .fib_entry_act_remote_pack = mlxsw_sp_router_ll_xm_fib_entry_act_remote_pack, + .fib_entry_act_local_pack = mlxsw_sp_router_ll_xm_fib_entry_act_local_pack, + .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_pack, + .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_xm_fib_entry_act_ip2me_tun_pack, + .fib_entry_commit = mlxsw_sp_router_ll_xm_fib_entry_commit, + .fib_entry_is_committed = mlxsw_sp_router_ll_xm_fib_entry_is_committed, +}; + +#define MLXSW_SP_ROUTER_XM_MINDEX_SIZE (64 * 1024) + +int mlxsw_sp_router_xm_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_router_xm *router_xm; + char rxltm_pl[MLXSW_REG_RXLTM_LEN]; + char xltq_pl[MLXSW_REG_XLTQ_LEN]; + u32 mindex_size; + u16 device_id; + int err; + + if (!mlxsw_sp->bus_info->xm_exists) + return 0; + + router_xm = kzalloc(sizeof(*router_xm), GFP_KERNEL); + if (!router_xm) + return -ENOMEM; + + mlxsw_reg_xltq_pack(xltq_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(xltq), xltq_pl); + if (err) + goto err_xltq_query; + mlxsw_reg_xltq_unpack(xltq_pl, &device_id, &router_xm->ipv4_supported, + &router_xm->ipv6_supported, &router_xm->entries_size, &mindex_size); + + if (device_id != MLXSW_REG_XLTQ_XM_DEVICE_ID_XLT) { + dev_err(mlxsw_sp->bus_info->dev, "Invalid XM device id\n"); + err = -EINVAL; + goto err_device_id_check; + } + + if (mindex_size != MLXSW_SP_ROUTER_XM_MINDEX_SIZE) { + dev_err(mlxsw_sp->bus_info->dev, "Unexpected M-index size\n"); + err = -EINVAL; + goto err_mindex_size_check; + } + + mlxsw_reg_rxltm_pack(rxltm_pl, mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV4], + mlxsw_sp_router_xm_m_val[MLXSW_SP_L3_PROTO_IPV6]); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rxltm), rxltm_pl); + if (err) + goto err_rxltm_write; + + err = rhashtable_init(&router_xm->ltable_ht, &mlxsw_sp_router_xm_ltable_ht_params); + if (err) + goto err_ltable_ht_init; + + err = rhashtable_init(&router_xm->flush_ht, &mlxsw_sp_router_xm_flush_ht_params); + if (err) + goto err_flush_ht_init; + + mlxsw_sp->router->xm = router_xm; + return 0; + +err_flush_ht_init: + rhashtable_destroy(&router_xm->ltable_ht); +err_ltable_ht_init: +err_rxltm_write: +err_mindex_size_check: +err_device_id_check: +err_xltq_query: + kfree(router_xm); + return err; +} + +void mlxsw_sp_router_xm_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + + if (!mlxsw_sp->bus_info->xm_exists) + return; + + rhashtable_destroy(&router_xm->flush_ht); + rhashtable_destroy(&router_xm->ltable_ht); + kfree(router_xm); +} + +bool mlxsw_sp_router_xm_ipv4_is_supported(const struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_router_xm *router_xm = mlxsw_sp->router->xm; + + return router_xm && router_xm->ipv4_supported; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 6501ce94ace5..cea42f6ed89b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -41,6 +41,7 @@ struct mlxsw_sp_bridge { DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; + const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops; }; struct mlxsw_sp_bridge_device { @@ -228,8 +229,14 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, bridge_device->mrouter = br_multicast_router(br_dev); INIT_LIST_HEAD(&bridge_device->ports_list); if (vlan_enabled) { + u16 proto; + bridge->vlan_enabled_exists = true; - bridge_device->ops = bridge->bridge_8021q_ops; + br_vlan_get_proto(br_dev, &proto); + if (proto == ETH_P_8021AD) + bridge_device->ops = bridge->bridge_8021ad_ops; + else + bridge_device->ops = bridge->bridge_8021q_ops; } else { bridge_device->ops = bridge->bridge_8021d_ops; } @@ -757,6 +764,25 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } +static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + u16 vlan_proto) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; + + if (!switchdev_trans_ph_prepare(trans)) + return 0; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); + if (WARN_ON(!bridge_device)) + return -EINVAL; + + netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n"); + return -EINVAL; +} + static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, struct net_device *orig_dev, @@ -926,6 +952,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->orig_dev, attr->u.vlan_filtering); break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL: + err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.vlan_protocol); + break; case SWITCHDEV_ATTR_ID_PORT_MROUTER: err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, attr->orig_dev, @@ -1129,6 +1160,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; u16 old_pvid = mlxsw_sp_port->pvid; + u16 proto; int err; /* The only valid scenario in which a port-vlan already exists, is if @@ -1152,7 +1184,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_port_vlan_set; - err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); + br_vlan_get_proto(bridge_port->bridge_device->dev, &proto); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto); if (err) goto err_port_pvid_set; @@ -1164,7 +1197,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; err_port_vlan_bridge_join: - mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto); err_port_pvid_set: mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); err_port_vlan_set: @@ -1821,13 +1854,15 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, { u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; + u16 proto; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return; mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); + br_vlan_get_proto(bridge_port->bridge_device->dev, &proto); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto); mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); } @@ -1975,10 +2010,9 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, - struct mlxsw_sp_bridge_port *bridge_port, - struct mlxsw_sp_port *mlxsw_sp_port, - struct netlink_ext_ack *extack) +mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) { if (is_vlan_dev(bridge_port->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); @@ -1992,19 +2026,37 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, return 0; } +static int +mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port, + extack); +} + +static void +mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + /* Make sure untagged frames are allowed to ingress */ + mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, + ETH_P_8021Q); +} + static void mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port) { - /* Make sure untagged frames are allowed to ingress */ - mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); + mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port); } static int -mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev, u16 vid, - struct netlink_ext_ack *extack) +mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, + u16 vid, u16 ethertype, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); @@ -2012,6 +2064,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, .type = MLXSW_SP_NVE_TYPE_VXLAN, .vni = vxlan->cfg.vni, .dev = vxlan_dev, + .ethertype = ethertype, }; struct mlxsw_sp_fid *fid; int err; @@ -2050,6 +2103,15 @@ err_vni_exists: return err; } +static int +mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev, + vid, ETH_P_8021Q, extack); +} + static struct net_device * mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid) { @@ -2180,6 +2242,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, .type = MLXSW_SP_NVE_TYPE_VXLAN, .vni = vxlan->cfg.vni, .dev = vxlan_dev, + .ethertype = ETH_P_8021Q, }; struct mlxsw_sp_fid *fid; int err; @@ -2246,6 +2309,57 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { .fid_vid = mlxsw_sp_bridge_8021d_fid_vid, }; +static int +mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false); + if (err) + return err; + + err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port, + extack); + if (err) + goto err_bridge_vlan_aware_port_join; + + return 0; + +err_bridge_vlan_aware_port_join: + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); + return err; +} + +static void +mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port); + mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true); +} + +static int +mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) +{ + return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev, + vid, ETH_P_8021AD, extack); +} + +static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = { + .port_join = mlxsw_sp_bridge_8021ad_port_join, + .port_leave = mlxsw_sp_bridge_8021ad_port_leave, + .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join, + .fid_get = mlxsw_sp_bridge_8021q_fid_get, + .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, + .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, +}; + int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev, @@ -3206,8 +3320,8 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, if (!fid) { if (!flag_untagged || !flag_pvid) return 0; - return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, - vxlan_dev, vid, extack); + return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, + vid, extack); } /* Second case: FID is associated with the VNI and the VLAN associated @@ -3246,16 +3360,14 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, if (!flag_untagged) return 0; - err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, - extack); + err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack); if (err) goto err_vxlan_join; return 0; err_vxlan_join: - mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid, - NULL); + bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL); return err; } @@ -3507,6 +3619,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; + bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops; return mlxsw_sp_fdb_init(mlxsw_sp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 433f14ade464..4ef12e3e021a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -617,7 +617,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { TRAP_TO_CPU), MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_EXCEPTIONS, TRAP_TO_CPU), - MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_EXCEPTIONS, + MLXSW_SP_RXL_EXCEPTION(RTR_EGRESS0, L3_EXCEPTIONS, TRAP_EXCEPTION_TO_CPU), }, }, @@ -1007,6 +1007,12 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { false), }, }, + { + .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_NEXTHOP, L3_DROPS), + .listeners_arr = { + MLXSW_SP_RXL_DISCARD(ROUTER3, L3_DISCARDS), + }, + }, }; static struct mlxsw_sp_trap_policer_item * diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 5023d91269f4..40e2e79d4517 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -6,6 +6,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/netdevice.h> +#include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/device.h> diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 57f9e24602d0..9e070ab3ed76 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -52,6 +52,7 @@ enum { MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71, MLXSW_TRAP_ID_IPV6_PIM = 0x79, MLXSW_TRAP_ID_IPV6_VRRP = 0x7A, + MLXSW_TRAP_ID_RTR_EGRESS0 = 0x80, MLXSW_TRAP_ID_IPV4_BGP = 0x88, MLXSW_TRAP_ID_IPV6_BGP = 0x89, MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index d65872172229..6fc7483aea03 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -1112,7 +1112,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev, /* setup mii state */ ks->mii.dev = netdev; - ks->mii.phy_id = 1, + ks->mii.phy_id = 1; ks->mii.phy_id_mask = 1; ks->mii.reg_num_mask = 0xf; ks->mii.mdio_read = ks8851_phy_read; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index dcde496da7fb..c5de8f46cdd3 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev, wol->supported = 0; wol->wolopts = 0; - phy_ethtool_get_wol(netdev->phydev, wol); + + if (netdev->phydev) + phy_ethtool_get_wol(netdev->phydev, wol); wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | WAKE_MAGIC | WAKE_PHY | WAKE_ARP; @@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev, device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts); - phy_ethtool_set_wol(netdev->phydev, wol); - - return 0; + return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol) + : -ENETDOWN; } #endif /* CONFIG_PM */ diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index b319c22c211c..dbd6c3946aa6 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -140,18 +140,14 @@ clean_up: return result; } -static void lan743x_intr_software_isr(void *context) +static void lan743x_intr_software_isr(struct lan743x_adapter *adapter) { - struct lan743x_adapter *adapter = context; struct lan743x_intr *intr = &adapter->intr; - u32 int_sts; - int_sts = lan743x_csr_read(adapter, INT_STS); - if (int_sts & INT_BIT_SW_GP_) { - /* disable the interrupt to prevent repeated re-triggering */ - lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); - intr->software_isr_flag = 1; - } + /* disable the interrupt to prevent repeated re-triggering */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); + intr->software_isr_flag = true; + wake_up(&intr->software_isr_wq); } static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) @@ -348,27 +344,22 @@ irq_done: static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) { struct lan743x_intr *intr = &adapter->intr; - int result = -ENODEV; - int timeout = 10; + int ret; - intr->software_isr_flag = 0; + intr->software_isr_flag = false; - /* enable interrupt */ + /* enable and activate test interrupt */ lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); - - /* activate interrupt here */ lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); - while ((timeout > 0) && (!(intr->software_isr_flag))) { - usleep_range(1000, 20000); - timeout--; - } - if (intr->software_isr_flag) - result = 0; + ret = wait_event_timeout(intr->software_isr_wq, + intr->software_isr_flag, + msecs_to_jiffies(200)); - /* disable interrupts */ + /* disable test interrupt */ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); - return result; + + return ret > 0 ? 0 : -ENODEV; } static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, @@ -542,6 +533,8 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter) flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; } + init_waitqueue_head(&intr->software_isr_wq); + ret = lan743x_intr_register_isr(adapter, 0, flags, INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | INT_BIT_ALL_OTHER_, @@ -830,14 +823,13 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) static int lan743x_mac_open(struct lan743x_adapter *adapter) { - int ret = 0; u32 temp; temp = lan743x_csr_read(adapter, MAC_RX); lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); temp = lan743x_csr_read(adapter, MAC_TX); lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); - return ret; + return 0; } static void lan743x_mac_close(struct lan743x_adapter *adapter) @@ -958,7 +950,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev) data = lan743x_csr_read(adapter, MAC_CR); /* set interface mode */ - if (phy_interface_mode_is_rgmii(adapter->phy_mode)) + if (phy_interface_is_rgmii(phydev)) /* RGMII */ data &= ~MAC_CR_MII_EN_; else @@ -1014,33 +1006,14 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter) static int lan743x_phy_open(struct lan743x_adapter *adapter) { + struct net_device *netdev = adapter->netdev; struct lan743x_phy *phy = &adapter->phy; - struct phy_device *phydev = NULL; - struct device_node *phynode; - struct net_device *netdev; + struct phy_device *phydev; int ret = -EIO; - netdev = adapter->netdev; - phynode = of_node_get(adapter->pdev->dev.of_node); - - if (phynode) { - /* try devicetree phy, or fixed link */ - of_get_phy_mode(phynode, &adapter->phy_mode); - - if (of_phy_is_fixed_link(phynode)) { - ret = of_phy_register_fixed_link(phynode); - if (ret) { - netdev_err(netdev, - "cannot register fixed PHY\n"); - of_node_put(phynode); - goto return_error; - } - } - phydev = of_phy_connect(netdev, phynode, - lan743x_phy_link_status_change, 0, - adapter->phy_mode); - of_node_put(phynode); - } + /* try devicetree phy, or fixed link */ + phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node, + lan743x_phy_link_status_change); if (!phydev) { /* try internal phy */ @@ -1048,10 +1021,9 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) if (!phydev) goto return_error; - adapter->phy_mode = PHY_INTERFACE_MODE_GMII; ret = phy_connect_direct(netdev, phydev, lan743x_phy_link_status_change, - adapter->phy_mode); + PHY_INTERFACE_MODE_GMII); if (ret) goto return_error; } @@ -1066,6 +1038,7 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) phy_start(phydev); phy_start_aneg(phydev); + phy_attached_info(phydev); return 0; return_error: diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index a536f4a4994d..404af3f4635e 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -616,7 +616,8 @@ struct lan743x_intr { int number_of_vectors; bool using_vectors; - int software_isr_flag; + bool software_isr_flag; + wait_queue_head_t software_isr_wq; }; #define LAN743X_MAX_FRAME_SIZE (9 * 1024) @@ -703,7 +704,6 @@ struct lan743x_rx { struct lan743x_adapter { struct net_device *netdev; struct mii_bus *mdiobus; - phy_interface_t phy_mode; int msg_enable; #ifdef CONFIG_PM u32 wolopts; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index a53bd36b11c6..0b9992bd6626 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -147,42 +147,20 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) return ocelot_vlant_wait_for_completion(ocelot); } -static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port, - u16 vid) +static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port, + struct ocelot_vlan native_vlan) { struct ocelot_port *ocelot_port = ocelot->ports[port]; u32 val = 0; - if (ocelot_port->vid != vid) { - /* Always permit deleting the native VLAN (vid = 0) */ - if (ocelot_port->vid && vid) { - dev_err(ocelot->dev, - "Port already has a native VLAN: %d\n", - ocelot_port->vid); - return -EBUSY; - } - ocelot_port->vid = vid; - } + ocelot_port->native_vlan = native_vlan; - ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid), + ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid), REW_PORT_VLAN_CFG_PORT_VID_M, REW_PORT_VLAN_CFG, port); - if (ocelot_port->vlan_aware && !ocelot_port->vid) - /* If port is vlan-aware and tagged, drop untagged and priority - * tagged frames. - */ - val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; - ocelot_rmw_gix(ocelot, val, - ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, - ANA_PORT_DROP_CFG, port); - if (ocelot_port->vlan_aware) { - if (ocelot_port->vid) + if (native_vlan.valid) /* Tag all frames except when VID == DEFAULT_VLAN */ val = REW_TAG_CFG_TAG_CFG(1); else @@ -195,8 +173,38 @@ static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port, ocelot_rmw_gix(ocelot, val, REW_TAG_CFG_TAG_CFG_M, REW_TAG_CFG, port); +} - return 0; +/* Default vlan to clasify for untagged frames (may be zero) */ +static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, + struct ocelot_vlan pvid_vlan) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u32 val = 0; + + ocelot_port->pvid_vlan = pvid_vlan; + + if (!ocelot_port->vlan_aware) + pvid_vlan.vid = 0; + + ocelot_rmw_gix(ocelot, + ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid), + ANA_PORT_VLAN_CFG_VLAN_VID_M, + ANA_PORT_VLAN_CFG, port); + + /* If there's no pvid, we should drop not only untagged traffic (which + * happens automatically), but also 802.1p traffic which gets + * classified to VLAN 0, but that is always in our RX filter, so it + * would get accepted were it not for this setting. + */ + if (!pvid_vlan.valid && ocelot_port->vlan_aware) + val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + + ocelot_rmw_gix(ocelot, val, + ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, + ANA_PORT_DROP_CFG, port); } int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, @@ -233,24 +241,30 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, ANA_PORT_VLAN_CFG, port); - ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid); + ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); + ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan); return 0; } EXPORT_SYMBOL(ocelot_port_vlan_filtering); -/* Default vlan to clasify for untagged frames (may be zero) */ -static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid) +int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, + bool untagged) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - ocelot_rmw_gix(ocelot, - ANA_PORT_VLAN_CFG_VLAN_VID(pvid), - ANA_PORT_VLAN_CFG_VLAN_VID_M, - ANA_PORT_VLAN_CFG, port); + /* Deny changing the native VLAN, but always permit deleting it */ + if (untagged && ocelot_port->native_vlan.vid != vid && + ocelot_port->native_vlan.valid) { + dev_err(ocelot->dev, + "Port already has a native VLAN: %d\n", + ocelot_port->native_vlan.vid); + return -EBUSY; + } - ocelot_port->pvid = pvid; + return 0; } +EXPORT_SYMBOL(ocelot_vlan_prepare); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged) @@ -264,14 +278,21 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, return ret; /* Default ingress vlan classification */ - if (pvid) - ocelot_port_set_pvid(ocelot, port, vid); + if (pvid) { + struct ocelot_vlan pvid_vlan; + + pvid_vlan.vid = vid; + pvid_vlan.valid = true; + ocelot_port_set_pvid(ocelot, port, pvid_vlan); + } /* Untagged egress vlan clasification */ if (untagged) { - ret = ocelot_port_set_native_vlan(ocelot, port, vid); - if (ret) - return ret; + struct ocelot_vlan native_vlan; + + native_vlan.vid = vid; + native_vlan.valid = true; + ocelot_port_set_native_vlan(ocelot, port, native_vlan); } return 0; @@ -290,12 +311,18 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) return ret; /* Ingress */ - if (ocelot_port->pvid == vid) - ocelot_port_set_pvid(ocelot, port, 0); + if (ocelot_port->pvid_vlan.vid == vid) { + struct ocelot_vlan pvid_vlan = {0}; + + ocelot_port_set_pvid(ocelot, port, pvid_vlan); + } /* Egress */ - if (ocelot_port->vid == vid) - ocelot_port_set_native_vlan(ocelot, port, 0); + if (ocelot_port->native_vlan.vid == vid) { + struct ocelot_vlan native_vlan = {0}; + + ocelot_port_set_native_vlan(ocelot, port, native_vlan); + } return 0; } @@ -542,26 +569,11 @@ EXPORT_SYMBOL(ocelot_get_txtstamp); int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; int pgid = port; if (port == ocelot->npi) pgid = PGID_CPU; - if (!vid) { - if (!ocelot_port->vlan_aware) - /* If the bridge is not VLAN aware and no VID was - * provided, set it to pvid to ensure the MAC entry - * matches incoming untagged packets - */ - vid = ocelot_port->pvid; - else - /* If the bridge is VLAN aware a VID must be provided as - * otherwise the learnt entry wouldn't match any frame. - */ - return -EINVAL; - } - return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); } EXPORT_SYMBOL(ocelot_fdb_add); @@ -958,52 +970,88 @@ static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr) return ENTRYTYPE_MACv4; if (addr[0] == 0x33 && addr[1] == 0x33) return ENTRYTYPE_MACv6; - return ENTRYTYPE_NORMAL; + return ENTRYTYPE_LOCKED; +} + +static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index, + unsigned long ports) +{ + struct ocelot_pgid *pgid; + + pgid = kzalloc(sizeof(*pgid), GFP_KERNEL); + if (!pgid) + return ERR_PTR(-ENOMEM); + + pgid->ports = ports; + pgid->index = index; + refcount_set(&pgid->refcount, 1); + list_add_tail(&pgid->list, &ocelot->pgids); + + return pgid; } -static int ocelot_mdb_get_pgid(struct ocelot *ocelot, - enum macaccess_entry_type entry_type) +static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid) { - int pgid; + if (!refcount_dec_and_test(&pgid->refcount)) + return; + + list_del(&pgid->list); + kfree(pgid); +} + +static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot, + const struct ocelot_multicast *mc) +{ + struct ocelot_pgid *pgid; + int index; /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the * destination mask table (PGID), the destination set is programmed as * part of the entry MAC address.", and the DEST_IDX is set to 0. */ - if (entry_type == ENTRYTYPE_MACv4 || - entry_type == ENTRYTYPE_MACv6) - return 0; + if (mc->entry_type == ENTRYTYPE_MACv4 || + mc->entry_type == ENTRYTYPE_MACv6) + return ocelot_pgid_alloc(ocelot, 0, mc->ports); - for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) { - struct ocelot_multicast *mc; + list_for_each_entry(pgid, &ocelot->pgids, list) { + /* When searching for a nonreserved multicast PGID, ignore the + * dummy PGID of zero that we have for MACv4/MACv6 entries + */ + if (pgid->index && pgid->ports == mc->ports) { + refcount_inc(&pgid->refcount); + return pgid; + } + } + + /* Search for a free index in the nonreserved multicast PGID area */ + for_each_nonreserved_multicast_dest_pgid(ocelot, index) { bool used = false; - list_for_each_entry(mc, &ocelot->multicast, list) { - if (mc->pgid == pgid) { + list_for_each_entry(pgid, &ocelot->pgids, list) { + if (pgid->index == index) { used = true; break; } } if (!used) - return pgid; + return ocelot_pgid_alloc(ocelot, index, mc->ports); } - return -1; + return ERR_PTR(-ENOSPC); } static void ocelot_encode_ports_to_mdb(unsigned char *addr, - struct ocelot_multicast *mc, - enum macaccess_entry_type entry_type) + struct ocelot_multicast *mc) { - memcpy(addr, mc->addr, ETH_ALEN); + ether_addr_copy(addr, mc->addr); - if (entry_type == ENTRYTYPE_MACv4) { + if (mc->entry_type == ENTRYTYPE_MACv4) { addr[0] = 0; addr[1] = mc->ports >> 8; addr[2] = mc->ports & 0xff; - } else if (entry_type == ENTRYTYPE_MACv6) { + } else if (mc->entry_type == ENTRYTYPE_MACv6) { addr[0] = mc->ports >> 8; addr[1] = mc->ports & 0xff; } @@ -1012,80 +1060,78 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr, int ocelot_port_mdb_add(struct ocelot *ocelot, int port, const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - enum macaccess_entry_type entry_type; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; + struct ocelot_pgid *pgid; u16 vid = mdb->vid; - bool new = false; if (port == ocelot->npi) port = ocelot->num_phys_ports; - if (!vid) - vid = ocelot_port->pvid; - - entry_type = ocelot_classify_mdb(mdb->addr); - mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { - int pgid = ocelot_mdb_get_pgid(ocelot, entry_type); - - if (pgid < 0) { - dev_err(ocelot->dev, - "No more PGIDs available for mdb %pM vid %d\n", - mdb->addr, vid); - return -ENOSPC; - } - + /* New entry */ mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; - memcpy(mc->addr, mdb->addr, ETH_ALEN); + mc->entry_type = ocelot_classify_mdb(mdb->addr); + ether_addr_copy(mc->addr, mdb->addr); mc->vid = vid; - mc->pgid = pgid; list_add_tail(&mc->list, &ocelot->multicast); - new = true; - } - - if (!new) { - ocelot_encode_ports_to_mdb(addr, mc, entry_type); + } else { + /* Existing entry. Clean up the current port mask from + * hardware now, because we'll be modifying it. + */ + ocelot_pgid_free(ocelot, mc->pgid); + ocelot_encode_ports_to_mdb(addr, mc); ocelot_mact_forget(ocelot, addr, vid); } mc->ports |= BIT(port); - ocelot_encode_ports_to_mdb(addr, mc, entry_type); - return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type); + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) { + dev_err(ocelot->dev, + "Cannot allocate PGID for mdb %pM vid %d\n", + mc->addr, mc->vid); + devm_kfree(ocelot->dev, mc); + return PTR_ERR(pgid); + } + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); } EXPORT_SYMBOL(ocelot_port_mdb_add); int ocelot_port_mdb_del(struct ocelot *ocelot, int port, const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - enum macaccess_entry_type entry_type; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; + struct ocelot_pgid *pgid; u16 vid = mdb->vid; if (port == ocelot->npi) port = ocelot->num_phys_ports; - if (!vid) - vid = ocelot_port->pvid; - mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) return -ENOENT; - entry_type = ocelot_classify_mdb(mdb->addr); - - ocelot_encode_ports_to_mdb(addr, mc, entry_type); + ocelot_encode_ports_to_mdb(addr, mc); ocelot_mact_forget(ocelot, addr, vid); + ocelot_pgid_free(ocelot, mc->pgid); mc->ports &= ~BIT(port); if (!mc->ports) { list_del(&mc->list); @@ -1093,9 +1139,21 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, return 0; } - ocelot_encode_ports_to_mdb(addr, mc, entry_type); + /* We have a PGID with fewer ports now */ + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) + return PTR_ERR(pgid); + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); - return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type); + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); } EXPORT_SYMBOL(ocelot_port_mdb_del); @@ -1120,6 +1178,7 @@ EXPORT_SYMBOL(ocelot_port_bridge_join); int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, struct net_device *bridge) { + struct ocelot_vlan pvid = {0}, native_vlan = {0}; struct switchdev_trans trans; int ret; @@ -1138,8 +1197,10 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, if (ret) return ret; - ocelot_port_set_pvid(ocelot, port, 0); - return ocelot_port_set_native_vlan(ocelot, port, 0); + ocelot_port_set_pvid(ocelot, port, pvid); + ocelot_port_set_native_vlan(ocelot, port, native_vlan); + + return 0; } EXPORT_SYMBOL(ocelot_port_bridge_leave); @@ -1452,7 +1513,14 @@ int ocelot_init(struct ocelot *ocelot) if (!ocelot->stats_queue) return -ENOMEM; + ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0); + if (!ocelot->owq) { + destroy_workqueue(ocelot->stats_queue); + return -ENOMEM; + } + INIT_LIST_HEAD(&ocelot->multicast); + INIT_LIST_HEAD(&ocelot->pgids); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); ocelot_vcap_init(ocelot); @@ -1557,6 +1625,7 @@ void ocelot_deinit(struct ocelot *ocelot) { cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); + destroy_workqueue(ocelot->owq); mutex_destroy(&ocelot->stats_lock); } EXPORT_SYMBOL(ocelot_deinit); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index abb407dff93c..291d39d49c4e 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -41,14 +41,6 @@ struct frame_info { u32 timestamp; /* rew_val */ }; -struct ocelot_multicast { - struct list_head list; - unsigned char addr[ETH_ALEN]; - u16 vid; - u16 ports; - int pgid; -}; - struct ocelot_port_tc { bool block_shared; unsigned long offload_cnt; @@ -87,6 +79,29 @@ enum macaccess_entry_type { ENTRYTYPE_MACv6, }; +/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports + * possibilities of egress port masks for L2 multicast traffic. + * For a switch with 9 user ports, there are 512 possible port masks, but the + * hardware only has 46 individual PGIDs that it can forward multicast traffic + * to. So we need a structure that maps the limited PGID indices to the port + * destinations requested by the user for L2 multicast. + */ +struct ocelot_pgid { + unsigned long ports; + int index; + refcount_t refcount; + struct list_head list; +}; + +struct ocelot_multicast { + struct list_head list; + enum macaccess_entry_type entry_type; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 ports; + struct ocelot_pgid *pgid; +}; + int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, bool is_static, void *data); int ocelot_mact_learn(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index b34da11acf65..2bd2840d88bd 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -206,6 +206,17 @@ static void ocelot_port_adjust_link(struct net_device *dev) ocelot_adjust_link(ocelot, port, dev->phydev); } +static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged); +} + static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, bool untagged) { @@ -403,13 +414,81 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +enum ocelot_action_type { + OCELOT_MACT_LEARN, + OCELOT_MACT_FORGET, +}; + +struct ocelot_mact_work_ctx { + struct work_struct work; + struct ocelot *ocelot; + enum ocelot_action_type type; + union { + /* OCELOT_MACT_LEARN */ + struct { + unsigned char addr[ETH_ALEN]; + u16 vid; + enum macaccess_entry_type entry_type; + int pgid; + } learn; + /* OCELOT_MACT_FORGET */ + struct { + unsigned char addr[ETH_ALEN]; + u16 vid; + } forget; + }; +}; + +#define ocelot_work_to_ctx(x) \ + container_of((x), struct ocelot_mact_work_ctx, work) + +static void ocelot_mact_work(struct work_struct *work) +{ + struct ocelot_mact_work_ctx *w = ocelot_work_to_ctx(work); + struct ocelot *ocelot = w->ocelot; + + switch (w->type) { + case OCELOT_MACT_LEARN: + ocelot_mact_learn(ocelot, w->learn.pgid, w->learn.addr, + w->learn.vid, w->learn.entry_type); + break; + case OCELOT_MACT_FORGET: + ocelot_mact_forget(ocelot, w->forget.addr, w->forget.vid); + break; + default: + break; + }; + + kfree(w); +} + +static int ocelot_enqueue_mact_action(struct ocelot *ocelot, + const struct ocelot_mact_work_ctx *ctx) +{ + struct ocelot_mact_work_ctx *w = kmemdup(ctx, sizeof(*w), GFP_ATOMIC); + + if (!w) + return -ENOMEM; + + w->ocelot = ocelot; + INIT_WORK(&w->work, ocelot_mact_work); + queue_work(ocelot->owq, &w->work); + + return 0; +} + static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_mact_work_ctx w; - return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid); + ether_addr_copy(w.forget.addr, addr); + w.forget.vid = ocelot_port->pvid_vlan.vid; + w.type = OCELOT_MACT_FORGET; + + return ocelot_enqueue_mact_action(ocelot, &w); } static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) @@ -417,9 +496,15 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_mact_work_ctx w; + + ether_addr_copy(w.learn.addr, addr); + w.learn.vid = ocelot_port->pvid_vlan.vid; + w.learn.pgid = PGID_CPU; + w.learn.entry_type = ENTRYTYPE_LOCKED; + w.type = OCELOT_MACT_LEARN; - return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid, - ENTRYTYPE_LOCKED); + return ocelot_enqueue_mact_action(ocelot, &w); } static void ocelot_set_rx_mode(struct net_device *dev) @@ -462,10 +547,10 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p) const struct sockaddr *addr = p; /* Learn the new net device MAC address in the mac table. */ - ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid, - ENTRYTYPE_LOCKED); + ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, + ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED); /* Then forget the previous one. */ - ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid); + ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid); ether_addr_copy(dev->dev_addr, addr->sa_data); return 0; @@ -812,9 +897,14 @@ static int ocelot_port_obj_add_vlan(struct net_device *dev, u16 vid; for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - ret = ocelot_vlan_vid_add(dev, vid, - vlan->flags & BRIDGE_VLAN_INFO_PVID, - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + + if (switchdev_trans_ph_prepare(trans)) + ret = ocelot_vlan_vid_prepare(dev, vid, pvid, + untagged); + else + ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged); if (ret) return ret; } @@ -1074,8 +1164,8 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); dev->dev_addr[ETH_ALEN - 1] += port; - ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, - ENTRYTYPE_LOCKED); + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, + ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED); ocelot_init_port(ocelot, port); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index d13d92bf7447..8f2f091bce89 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -1106,7 +1106,7 @@ static int s2io_print_pci_mode(struct s2io_nic *nic) * '-1' on failure */ -static int init_tti(struct s2io_nic *nic, int link) +static int init_tti(struct s2io_nic *nic, int link, bool may_sleep) { struct XENA_dev_config __iomem *bar0 = nic->bar0; register u64 val64 = 0; @@ -1166,7 +1166,7 @@ static int init_tti(struct s2io_nic *nic, int link) if (wait_for_cmd_complete(&bar0->tti_command_mem, TTI_CMD_MEM_STROBE_NEW_CMD, - S2IO_BIT_RESET) != SUCCESS) + S2IO_BIT_RESET, may_sleep) != SUCCESS) return FAILURE; } @@ -1659,7 +1659,7 @@ static int init_nic(struct s2io_nic *nic) */ /* Initialize TTI */ - if (SUCCESS != init_tti(nic, nic->last_link_state)) + if (SUCCESS != init_tti(nic, nic->last_link_state, true)) return -ENODEV; /* RTI Initialization */ @@ -3331,7 +3331,7 @@ static void s2io_updt_xpak_counter(struct net_device *dev) */ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, - int bit_state) + int bit_state, bool may_sleep) { int ret = FAILURE, cnt = 0, delay = 1; u64 val64; @@ -3353,7 +3353,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, } } - if (in_interrupt()) + if (!may_sleep) mdelay(delay); else msleep(delay); @@ -4877,8 +4877,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) * Return value: * void. */ - -static void s2io_set_multicast(struct net_device *dev) +static void s2io_set_multicast(struct net_device *dev, bool may_sleep) { int i, j, prev_cnt; struct netdev_hw_addr *ha; @@ -4903,7 +4902,7 @@ static void s2io_set_multicast(struct net_device *dev) /* Wait till command completes */ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET); + S2IO_BIT_RESET, may_sleep); sp->m_cast_flg = 1; sp->all_multi_pos = config->max_mc_addr - 1; @@ -4920,7 +4919,7 @@ static void s2io_set_multicast(struct net_device *dev) /* Wait till command completes */ wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET); + S2IO_BIT_RESET, may_sleep); sp->m_cast_flg = 0; sp->all_multi_pos = 0; @@ -5000,7 +4999,7 @@ static void s2io_set_multicast(struct net_device *dev) /* Wait for command completes */ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET)) { + S2IO_BIT_RESET, may_sleep)) { DBG_PRINT(ERR_DBG, "%s: Adding Multicasts failed\n", dev->name); @@ -5030,7 +5029,7 @@ static void s2io_set_multicast(struct net_device *dev) /* Wait for command completes */ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET)) { + S2IO_BIT_RESET, may_sleep)) { DBG_PRINT(ERR_DBG, "%s: Adding Multicasts failed\n", dev->name); @@ -5041,6 +5040,12 @@ static void s2io_set_multicast(struct net_device *dev) } } +/* NDO wrapper for s2io_set_multicast */ +static void s2io_ndo_set_multicast(struct net_device *dev) +{ + s2io_set_multicast(dev, false); +} + /* read from CAM unicast & multicast addresses and store it in * def_mac_addr structure */ @@ -5127,7 +5132,7 @@ static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off) /* Wait till command completes */ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET)) { + S2IO_BIT_RESET, true)) { DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n"); return FAILURE; } @@ -5171,7 +5176,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset) /* Wait till command completes */ if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET)) { + S2IO_BIT_RESET, true)) { DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n"); return FAILURE; } @@ -7141,7 +7146,7 @@ static int s2io_card_up(struct s2io_nic *sp) } /* Setting its receive mode */ - s2io_set_multicast(dev); + s2io_set_multicast(dev, true); if (dev->features & NETIF_F_LRO) { /* Initialize max aggregatable pkts per session based on MTU */ @@ -7447,7 +7452,7 @@ static void s2io_link(struct s2io_nic *sp, int link) struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; if (link != sp->last_link_state) { - init_tti(sp, link); + init_tti(sp, link, false); if (link == LINK_DOWN) { DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); s2io_stop_all_tx_queue(sp); @@ -7604,7 +7609,7 @@ static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring) return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl, RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, - S2IO_BIT_RESET); + S2IO_BIT_RESET, true); } static const struct net_device_ops s2io_netdev_ops = { @@ -7613,7 +7618,7 @@ static const struct net_device_ops s2io_netdev_ops = { .ndo_get_stats = s2io_get_stats, .ndo_start_xmit = s2io_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_set_rx_mode = s2io_set_multicast, + .ndo_set_rx_mode = s2io_ndo_set_multicast, .ndo_do_ioctl = s2io_ioctl, .ndo_set_mac_address = s2io_set_mac_addr, .ndo_change_mtu = s2io_change_mtu, @@ -7929,7 +7934,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) writeq(val64, &bar0->rmac_addr_cmd_mem); wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem, RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, - S2IO_BIT_RESET); + S2IO_BIT_RESET, true); tmp64 = readq(&bar0->rmac_addr_data0_mem); mac_down = (u32)tmp64; mac_up = (u32) (tmp64 >> 32); diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index 6fa3159a977f..5a6032212c19 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1066,7 +1066,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data); static void s2io_handle_errors(void * dev_id); static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue); -static void s2io_set_multicast(struct net_device *dev); +static void s2io_set_multicast(struct net_device *dev, bool may_sleep); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); static void s2io_link(struct s2io_nic * sp, int link); static void s2io_reset(struct s2io_nic * sp); @@ -1087,7 +1087,7 @@ static int s2io_set_swapper(struct s2io_nic * sp); static void s2io_card_down(struct s2io_nic *nic); static int s2io_card_up(struct s2io_nic *nic); static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, - int bit_state); + int bit_state, bool may_sleep); static int s2io_add_isr(struct s2io_nic * sp); static void s2io_rem_isr(struct s2io_nic * sp); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index f5d48d7c4ce2..5162b938a1ac 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -871,11 +871,11 @@ static enum vxge_hw_status __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, struct vxge_hw_device_hw_info *hw_info) { + __be64 *serial_number = (void *)hw_info->serial_number; + __be64 *product_desc = (void *)hw_info->product_desc; + __be64 *part_number = (void *)hw_info->part_number; enum vxge_hw_status status; u64 data0, data1 = 0, steer_ctrl = 0; - u8 *serial_number = hw_info->serial_number; - u8 *part_number = hw_info->part_number; - u8 *product_desc = hw_info->product_desc; u32 i, j = 0; data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER; @@ -887,8 +887,8 @@ __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, if (status != VXGE_HW_OK) return status; - ((u64 *)serial_number)[0] = be64_to_cpu(data0); - ((u64 *)serial_number)[1] = be64_to_cpu(data1); + serial_number[0] = cpu_to_be64(data0); + serial_number[1] = cpu_to_be64(data1); data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER; data1 = steer_ctrl = 0; @@ -900,8 +900,8 @@ __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, if (status != VXGE_HW_OK) return status; - ((u64 *)part_number)[0] = be64_to_cpu(data0); - ((u64 *)part_number)[1] = be64_to_cpu(data1); + part_number[0] = cpu_to_be64(data0); + part_number[1] = cpu_to_be64(data1); for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0; i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) { @@ -915,8 +915,8 @@ __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath, if (status != VXGE_HW_OK) return status; - ((u64 *)product_desc)[j++] = be64_to_cpu(data0); - ((u64 *)product_desc)[j++] = be64_to_cpu(data1); + product_desc[j++] = cpu_to_be64(data0); + product_desc[j++] = cpu_to_be64(data1); } return status; @@ -1121,7 +1121,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) list_for_each_safe(p, n, &blockpool->free_entry_list) { list_del(&((struct __vxge_hw_blockpool_entry *)p)->item); - kfree((void *)p); + kfree(p); } return; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/fw.h b/drivers/net/ethernet/netronome/nfp/crypto/fw.h index 8d1458896bcb..dcb67c2b5e5e 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/fw.h +++ b/drivers/net/ethernet/netronome/nfp/crypto/fw.h @@ -40,7 +40,7 @@ struct nfp_crypto_req_add_front { __be16 ipver_vlan __packed; u8 l4_proto; #define NFP_NET_TLS_NON_ADDR_KEY_LEN 8 - u8 l3_addrs[0]; + u8 l3_addrs[]; }; struct nfp_crypto_req_add_back { diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c index 76c51da5b66f..84d66d138c3d 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c @@ -295,8 +295,8 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk, ipv6 = true; break; } -#endif fallthrough; +#endif case AF_INET: req_sz = sizeof(struct nfp_crypto_req_add_v4); ipv6 = false; @@ -492,7 +492,7 @@ int nfp_net_tls_rx_resync_req(struct net_device *netdev, goto err_cnt_ign; } - switch (iph->version) { + switch (ipv6h->version) { case 4: sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 97d2b03208de..713ee3041d49 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -333,7 +333,7 @@ nfp_devlink_flash_update(struct devlink *devlink, struct devlink_flash_update_params *params, struct netlink_ext_ack *extack) { - return nfp_flash_update_common(devlink_priv(devlink), params->file_name, extack); + return nfp_flash_update_common(devlink_priv(devlink), params->fw, extack); } const struct devlink_ops nfp_devlink_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 7ff2ccbd43b0..742a420152b3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -301,11 +301,10 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) return nfp_pcie_sriov_enable(pdev, num_vfs); } -int nfp_flash_update_common(struct nfp_pf *pf, const char *path, +int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw, struct netlink_ext_ack *extack) { struct device *dev = &pf->pdev->dev; - const struct firmware *fw; struct nfp_nsp *nsp; int err; @@ -319,24 +318,12 @@ int nfp_flash_update_common(struct nfp_pf *pf, const char *path, return err; } - err = request_firmware_direct(&fw, path, dev); - if (err) { - NL_SET_ERR_MSG_MOD(extack, - "unable to read flash file from disk"); - goto exit_close_nsp; - } - - dev_info(dev, "Please be patient while writing flash image: %s\n", - path); - err = nfp_nsp_write_flash(nsp, fw); if (err < 0) - goto exit_release_fw; + goto exit_close_nsp; dev_info(dev, "Finished writing flash image\n"); err = 0; -exit_release_fw: - release_firmware(fw); exit_close_nsp: nfp_nsp_close(nsp); return err; @@ -724,10 +711,8 @@ static int nfp_pci_probe(struct pci_dev *pdev, } pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev); - if (IS_ERR_OR_NULL(pf->cpp)) { + if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); - if (err >= 0) - err = -ENOMEM; goto err_disable_msix; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index fa6b13a05941..a7dede946a33 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -166,7 +166,7 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length); -int nfp_flash_update_common(struct nfp_pf *pf, const char *path, +int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw, struct netlink_ext_ack *extack); enum nfp_dump_diag { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 437226866ce8..f21fb573ea3e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2533,7 +2533,7 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) if (dp->netdev) { err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, - rx_ring->idx); + rx_ring->idx, rx_ring->r_vec->napi.napi_id); if (err < 0) return err; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index f18e787fa9ad..10e7d8b21c46 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -1070,7 +1070,7 @@ int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, __le16 offset; __le16 readlen; u8 eth_index; - u8 data[0]; + u8 data[]; } __packed *buf; int bufsz, ret; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 2fc10a36afa4..8724d6a9ed02 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1043,8 +1043,7 @@ static int using_multi_irqs(struct net_device *dev) struct fe_priv *np = get_nvpriv(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || - ((np->msi_flags & NV_MSI_X_ENABLED) && - ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)) return 0; else return 1; @@ -1666,11 +1665,7 @@ static void nv_update_stats(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); - /* If it happens that this is run in top-half context, then - * replace the spin_lock of hwstats_lock with - * spin_lock_irqsave() in calling functions. */ - WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half"); - assert_spin_locked(&np->hwstats_lock); + lockdep_assert_held(&np->hwstats_lock); /* query hardware */ np->estats.tx_bytes += readl(base + NvRegTxCnt); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index ade8c44c01cd..140cee7c459d 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1816,7 +1816,8 @@ void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter, pch_gbe_clean_tx_ring(adapter, tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; - pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); tx_ring->desc = NULL; } @@ -1833,7 +1834,8 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, pch_gbe_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; - pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); rx_ring->desc = NULL; } @@ -1954,8 +1956,8 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter) pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); - pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size, - rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); + dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size, + rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic); rx_ring->rx_buff_pool_logic = 0; rx_ring->rx_buff_pool_size = 0; rx_ring->rx_buff_pool = NULL; @@ -2412,7 +2414,6 @@ static int __pch_gbe_suspend(struct pci_dev *pdev) struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_hw *hw = &adapter->hw; u32 wufc = adapter->wake_up_evt; - int retval = 0; netif_device_detach(netdev); if (netif_running(netdev)) @@ -2432,7 +2433,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev) pch_gbe_mac_set_wol_event(hw, wufc); pci_disable_device(pdev); } - return retval; + return 0; } #ifdef CONFIG_PM @@ -2503,17 +2504,11 @@ static int pch_gbe_probe(struct pci_dev *pdev, if (ret) return ret; - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) - || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { - ret = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); - if (ret) { - dev_err(&pdev->dev, "ERR: No usable DMA " - "configuration, aborting\n"); - return ret; - } + dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n"); + return ret; } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index dc5fbc2704f3..fb2b5bf179d7 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -25,7 +25,7 @@ static void ionic_watchdog_cb(struct timer_list *t) hb = ionic_heartbeat_check(ionic); if (hb >= 0) - ionic_link_status_check_request(ionic->lif, false); + ionic_link_status_check_request(ionic->lif, CAN_NOT_SLEEP); } void ionic_init_devinfo(struct ionic *ionic) @@ -142,7 +142,7 @@ int ionic_heartbeat_check(struct ionic *ionic) work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) { - dev_err(ionic->dev, "%s OOM\n", __func__); + dev_err(ionic->dev, "LIF reset trigger dropped\n"); } else { work->type = IONIC_DW_TYPE_LIF_RESET; if (fw_status & IONIC_FW_STS_F_RUNNING && diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 6c243b17312c..690768ff0143 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -12,8 +12,10 @@ #define IONIC_MAX_TX_DESC 8192 #define IONIC_MAX_RX_DESC 16384 -#define IONIC_MIN_TXRX_DESC 16 +#define IONIC_MIN_TXRX_DESC 64 #define IONIC_DEF_TXRX_DESC 4096 +#define IONIC_RX_FILL_THRESHOLD 16 +#define IONIC_RX_FILL_DIV 8 #define IONIC_LIFS_MAX 1024 #define IONIC_WATCHDOG_SECS 5 #define IONIC_ITR_COAL_USEC_DEFAULT 64 diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index 51d64718ed9f..b41301a5b0df 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -15,7 +15,7 @@ static int ionic_dl_flash_update(struct devlink *dl, { struct ionic *ionic = devlink_priv(dl); - return ionic_firmware_update(ionic->lif, params->file_name, extack); + return ionic_firmware_update(ionic->lif, params->fw, extack); } static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h index 5c01a9e306d8..0a77e8e810c5 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h @@ -6,7 +6,7 @@ #include <net/devlink.h> -int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name, +int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw, struct netlink_ext_ack *extack); struct ionic *ionic_devlink_alloc(struct device *dev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 35c72d4a78b3..0832bedcb3b4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -738,16 +738,11 @@ static int ionic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct ionic_lif *lif = netdev_priv(netdev); - int err; if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - err = ionic_lif_rss_config(lif, lif->rss_types, key, indir); - if (err) - return err; - - return 0; + return ionic_lif_rss_config(lif, lif->rss_types, key, indir); } static int ionic_set_tunable(struct net_device *dev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c index d7bbf336c6f6..5f40324cd243 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c @@ -91,7 +91,7 @@ static int ionic_fw_status_long_wait(struct ionic *ionic, return err; } -int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name, +int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw, struct netlink_ext_ack *extack) { struct ionic_dev *idev = &lif->ionic->idev; @@ -99,24 +99,16 @@ int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name, struct ionic *ionic = lif->ionic; union ionic_dev_cmd_comp comp; u32 buf_sz, copy_sz, offset; - const struct firmware *fw; struct devlink *dl; int next_interval; int err = 0; u8 fw_slot; - netdev_info(netdev, "Installing firmware %s\n", fw_name); + netdev_info(netdev, "Installing firmware\n"); dl = priv_to_devlink(ionic); - devlink_flash_update_begin_notify(dl); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); - err = request_firmware(&fw, fw_name, ionic->dev); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to find firmware file"); - goto err_out; - } - buf_sz = sizeof(idev->dev_cmd_regs->data); netdev_dbg(netdev, @@ -200,7 +192,5 @@ err_out: devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0); else devlink_flash_update_status_notify(dl, "Flash done", NULL, 0, 0); - release_firmware(fw); - devlink_flash_update_end_notify(dl); return err; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index a12df3946a07..11140915c2da 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ +#include <linux/ethtool.h> #include <linux/printk.h> #include <linux/dynamic_debug.h> #include <linux/netdevice.h> @@ -123,6 +124,12 @@ static void ionic_link_status_check(struct ionic_lif *lif) link_up = link_status == IONIC_PORT_OPER_STATUS_UP; if (link_up) { + if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { + mutex_lock(&lif->queue_lock); + ionic_start_queues(lif); + mutex_unlock(&lif->queue_lock); + } + if (!netif_carrier_ok(netdev)) { u32 link_speed; @@ -132,12 +139,6 @@ static void ionic_link_status_check(struct ionic_lif *lif) link_speed / 1000); netif_carrier_on(netdev); } - - if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) { - mutex_lock(&lif->queue_lock); - ionic_start_queues(lif); - mutex_unlock(&lif->queue_lock); - } } else { if (netif_carrier_ok(netdev)) { netdev_info(netdev, "Link down\n"); @@ -841,7 +842,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq, case IONIC_EVENT_RESET: work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) { - netdev_err(lif->netdev, "%s OOM\n", __func__); + netdev_err(lif->netdev, "Reset event dropped\n"); } else { work->type = IONIC_DW_TYPE_LIF_RESET; ionic_lif_deferred_enqueue(&lif->deferred, work); @@ -1050,10 +1051,8 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, if (!can_sleep) { work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "%s OOM\n", __func__); + if (!work) return -ENOMEM; - } work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD : IONIC_DW_TYPE_RX_ADDR_DEL; memcpy(work->addr, addr, ETH_ALEN); @@ -1074,22 +1073,22 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add, static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, true, true); + return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP); } static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, true, false); + return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP); } static int ionic_addr_del(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, false, true); + return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP); } static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, false, false); + return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP); } static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) @@ -1129,38 +1128,10 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode) lif->rx_mode = rx_mode; } -static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode, - bool from_ndo) -{ - struct ionic_deferred_work *work; - - if (from_ndo) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "%s OOM\n", __func__); - return; - } - work->type = IONIC_DW_TYPE_RX_MODE; - work->rx_mode = rx_mode; - netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); - } else { - ionic_lif_rx_mode(lif, rx_mode); - } -} - -static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo) -{ - if (from_ndo) - __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); - else - __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); - -} - -static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) +static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) { struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_deferred_work *work; unsigned int nfilters; unsigned int rx_mode; @@ -1177,7 +1148,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) * we remove our overflow flag and check the netdev flags * to see if we can disable NIC PROMISC */ - ionic_dev_uc_sync(netdev, from_ndo); + if (can_sleep) + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + else + __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); if (netdev_uc_count(netdev) + 1 > nfilters) { rx_mode |= IONIC_RX_MODE_F_PROMISC; @@ -1189,7 +1163,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) } /* same for multicast */ - ionic_dev_uc_sync(netdev, from_ndo); + if (can_sleep) + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); + else + __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); if (netdev_mc_count(netdev) > nfilters) { rx_mode |= IONIC_RX_MODE_F_ALLMULTI; @@ -1200,13 +1177,26 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo) rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; } - if (lif->rx_mode != rx_mode) - _ionic_lif_rx_mode(lif, rx_mode, from_ndo); + if (lif->rx_mode != rx_mode) { + if (!can_sleep) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + work->rx_mode = rx_mode; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } else { + ionic_lif_rx_mode(lif, rx_mode); + } + } } static void ionic_ndo_set_rx_mode(struct net_device *netdev) { - ionic_set_rx_mode(netdev, true); + ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); } static __le64 ionic_netdev_features_to_nic(netdev_features_t features) @@ -1475,12 +1465,14 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) if (err) return err; - netdev->mtu = new_mtu; /* if we're not running, nothing more to do */ - if (!netif_running(netdev)) + if (!netif_running(netdev)) { + netdev->mtu = new_mtu; return 0; + } ionic_stop_queues_reconfig(lif); + netdev->mtu = new_mtu; return ionic_start_queues_reconfig(lif); } @@ -1625,6 +1617,24 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif) ionic_lif_rss_config(lif, 0x0, NULL, NULL); } +static void ionic_lif_quiesce(struct ionic_lif *lif) +{ + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.lif_setattr = { + .opcode = IONIC_CMD_LIF_SETATTR, + .index = cpu_to_le16(lif->index), + .attr = IONIC_LIF_ATTR_STATE, + .state = IONIC_LIF_QUIESCE, + }, + }; + int err; + + err = ionic_adminq_post_wait(lif, &ctx); + if (err) + netdev_err(lif->netdev, "lif quiesce failed %d\n", err); +} + static void ionic_txrx_disable(struct ionic_lif *lif) { unsigned int i; @@ -1639,6 +1649,8 @@ static void ionic_txrx_disable(struct ionic_lif *lif) for (i = 0; i < lif->nxqs; i++) err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT)); } + + ionic_lif_quiesce(lif); } static void ionic_txrx_deinit(struct ionic_lif *lif) @@ -1773,7 +1785,7 @@ static int ionic_txrx_init(struct ionic_lif *lif) if (lif->netdev->features & NETIF_F_RXHASH) ionic_lif_rss_init(lif); - ionic_set_rx_mode(lif->netdev, false); + ionic_set_rx_mode(lif->netdev, CAN_SLEEP); return 0; @@ -2781,7 +2793,7 @@ static int ionic_station_set(struct ionic_lif *lif) */ if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) - ionic_lif_addr(lif, netdev->dev_addr, true, true); + ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); } else { /* Update the netdev mac with the device's mac */ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); @@ -2798,7 +2810,7 @@ static int ionic_station_set(struct ionic_lif *lif) netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", netdev->dev_addr); - ionic_lif_addr(lif, netdev->dev_addr, true, true); + ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP); return 0; } @@ -2959,6 +2971,8 @@ int ionic_lif_register(struct ionic_lif *lif) dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); return err; } + + ionic_link_status_check_request(lif, true); lif->registered = true; ionic_lif_set_netdev_info(lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 0224dfd24b8a..9bed42719ae5 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -13,6 +13,12 @@ #define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) #define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) + +#define ADD_ADDR true +#define DEL_ADDR false +#define CAN_SLEEP true +#define CAN_NOT_SLEEP false + #define IONIC_RX_COPYBREAK_DEFAULT 256 #define IONIC_TX_BUDGET_DEFAULT 256 diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index d355676f6c16..fbc57de6683e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -511,10 +511,8 @@ int ionic_port_init(struct ionic *ionic) idev->port_info_sz, &idev->port_info_pa, GFP_KERNEL); - if (!idev->port_info) { - dev_err(ionic->dev, "Failed to allocate port info\n"); + if (!idev->port_info) return -ENOMEM; - } } sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data)); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index ff20a2ac4c2f..6ae75b771a15 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ +#include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/netdevice.h> diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index b3d2250c77d0..9156c9825a16 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -392,11 +392,6 @@ void ionic_rx_fill(struct ionic_queue *q) q->dbval | q->head_idx); } -static void ionic_rx_fill_cb(void *arg) -{ - ionic_rx_fill(arg); -} - void ionic_rx_empty(struct ionic_queue *q) { struct ionic_desc_info *desc_info; @@ -480,6 +475,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) struct ionic_cq *cq = napi_to_cq(napi); struct ionic_dev *idev; struct ionic_lif *lif; + u16 rx_fill_threshold; u32 work_done = 0; u32 flags = 0; @@ -489,7 +485,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) work_done = ionic_cq_service(cq, budget, ionic_rx_service, NULL, NULL); - if (work_done) + rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD, + cq->num_descs / IONIC_RX_FILL_DIV); + if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold) ionic_rx_fill(cq->bound_q); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -518,6 +516,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) struct ionic_dev *idev; struct ionic_lif *lif; struct ionic_cq *txcq; + u16 rx_fill_threshold; u32 rx_work_done = 0; u32 tx_work_done = 0; u32 flags = 0; @@ -531,8 +530,11 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) rx_work_done = ionic_cq_service(rxcq, budget, ionic_rx_service, NULL, NULL); - if (rx_work_done) - ionic_rx_fill_cb(rxcq->bound_q); + + rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD, + rxcq->num_descs / IONIC_RX_FILL_DIV); + if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold) + ionic_rx_fill(rxcq->bound_q); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { ionic_dim_update(qcq); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 05e3a3b60269..9cf960a6d007 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1762,7 +1762,7 @@ static void qede_init_fp(struct qede_dev *edev) /* Driver have no error path from here */ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, - fp->rxq->rxq_id) < 0); + fp->rxq->rxq_id, 0) < 0); if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, MEM_TYPE_PAGE_ORDER0, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 30e52f969759..dd03be3fc82a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -2112,7 +2112,6 @@ static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - int retval; netif_device_detach(netdev); qlcnic_cancel_idc_work(adapter); @@ -2125,11 +2124,7 @@ static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) qlcnic_83xx_disable_mbx_intr(adapter); cancel_delayed_work_sync(&adapter->idc_aen_work); - retval = pci_save_state(pdev); - if (retval) - return retval; - - return 0; + return pci_save_state(pdev); } static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index fcdecddb2812..8d51b0cb545c 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -26,7 +26,7 @@ static int rmnet_is_real_dev_registered(const struct net_device *real_dev) } /* Needs rtnl lock */ -static struct rmnet_port* +struct rmnet_port* rmnet_get_port_rtnl(const struct net_device *real_dev) { return rtnl_dereference(real_dev->rx_handler_data); @@ -253,7 +253,10 @@ static int rmnet_config_notify_cb(struct notifier_block *nb, netdev_dbg(real_dev, "Kernel unregister\n"); rmnet_force_unassociate_device(real_dev); break; - + case NETDEV_CHANGEMTU: + if (rmnet_vnd_validate_real_dev_mtu(real_dev)) + return NOTIFY_BAD; + break; default: break; } @@ -329,9 +332,17 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_RMNET_FLAGS]) { struct ifla_rmnet_flags *flags; + u32 old_data_format; + old_data_format = port->data_format; flags = nla_data(data[IFLA_RMNET_FLAGS]); port->data_format = flags->flags & flags->mask; + + if (rmnet_vnd_update_dev_mtu(port, real_dev)) { + port->data_format = old_data_format; + NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev"); + return -EINVAL; + } } return 0; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index be515982d628..8d8d4690a074 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -73,4 +73,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev, struct netlink_ext_ack *extack); int rmnet_del_bridge(struct net_device *rmnet_dev, struct net_device *slave_dev); +struct rmnet_port* +rmnet_get_port_rtnl(const struct net_device *real_dev); #endif /* _RMNET_CONFIG_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index d58b51d277f1..41fbd2ceeede 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -5,6 +5,7 @@ */ #include <linux/etherdevice.h> +#include <linux/ethtool.h> #include <linux/if_arp.h> #include <net/pkt_sched.h> #include "rmnet_config.h" @@ -58,9 +59,30 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int rmnet_vnd_headroom(struct rmnet_port *port) +{ + u32 headroom; + + headroom = sizeof(struct rmnet_map_header); + + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) + headroom += sizeof(struct rmnet_map_ul_csum_header); + + return headroom; +} + static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) { - if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct rmnet_port *port; + u32 headroom; + + port = rmnet_get_port_rtnl(priv->real_dev); + + headroom = rmnet_vnd_headroom(port); + + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE || + new_mtu > (priv->real_dev->mtu - headroom)) return -EINVAL; rmnet_dev->mtu = new_mtu; @@ -229,6 +251,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, { struct rmnet_priv *priv = netdev_priv(rmnet_dev); + u32 headroom; int rc; if (rmnet_get_endpoint(port, id)) { @@ -242,6 +265,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, priv->real_dev = real_dev; + headroom = rmnet_vnd_headroom(port); + + if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev"); + return -EINVAL; + } + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; @@ -283,3 +313,45 @@ int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) return 0; } + +int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev) +{ + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; + struct rmnet_port *port; + unsigned long bkt_ep; + u32 headroom; + + port = rmnet_get_port_rtnl(real_dev); + + headroom = rmnet_vnd_headroom(port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + if (ep->egress_dev->mtu > (real_dev->mtu - headroom)) + return -1; + } + + return 0; +} + +int rmnet_vnd_update_dev_mtu(struct rmnet_port *port, + struct net_device *real_dev) +{ + struct hlist_node *tmp_ep; + struct rmnet_endpoint *ep; + unsigned long bkt_ep; + u32 headroom; + + headroom = rmnet_vnd_headroom(port); + + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) { + if (ep->egress_dev->mtu <= (real_dev->mtu - headroom)) + continue; + + if (rmnet_vnd_change_mtu(ep->egress_dev, + real_dev->mtu - headroom)) + return -1; + } + + return 0; +}
\ No newline at end of file diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index 4967f3461ed1..dc3a4443ef0a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -18,4 +18,7 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_setup(struct net_device *dev); +int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev); +int rmnet_vnd_update_dev_mtu(struct rmnet_port *port, + struct net_device *real_dev); #endif /* _RMNET_VND_H_ */ diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 85d9c3e30c69..46d8510b2fe2 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -67,8 +67,8 @@ #define R8169_REGS_SIZE 256 #define R8169_RX_BUF_SIZE (SZ_16K - 1) -#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ -#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) @@ -584,12 +584,6 @@ enum rtl_flag { RTL_FLAG_MAX }; -struct rtl8169_stats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; @@ -600,8 +594,6 @@ struct rtl8169_private { u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ u32 dirty_tx; - struct rtl8169_stats rx_stats; - struct rtl8169_stats tx_stats; struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ dma_addr_t TxPhyAddr; @@ -700,27 +692,6 @@ static bool rtl_supports_eee(struct rtl8169_private *tp) tp->mac_version != RTL_GIGA_MAC_VER_39; } -static void rtl_get_priv_stats(struct rtl8169_stats *stats, - u64 *pkts, u64 *bytes) -{ - unsigned int start; - - do { - start = u64_stats_fetch_begin_irq(&stats->syncp); - *pkts = stats->packets; - *bytes = stats->bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); -} - -static void rtl_inc_priv_stats(struct rtl8169_stats *stats, - u64 pkts, u64 bytes) -{ - u64_stats_update_begin(&stats->syncp); - stats->packets += pkts; - stats->bytes += bytes; - u64_stats_update_end(&stats->syncp); -} - static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg) { int i; @@ -1591,16 +1562,6 @@ static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } -static void rtl8169_reset_counters(struct rtl8169_private *tp) -{ - /* - * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the - * tally counters. - */ - if (tp->mac_version >= RTL_GIGA_MAC_VER_19) - rtl8169_do_counters(tp, CounterReset); -} - static void rtl8169_update_counters(struct rtl8169_private *tp) { u8 val = RTL_R8(tp, ChipCmd); @@ -1635,13 +1596,16 @@ static void rtl8169_init_counter_offsets(struct rtl8169_private *tp) if (tp->tc_offset.inited) return; - rtl8169_reset_counters(tp); - rtl8169_update_counters(tp); + if (tp->mac_version >= RTL_GIGA_MAC_VER_19) { + rtl8169_do_counters(tp, CounterReset); + } else { + rtl8169_update_counters(tp); + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.rx_missed = counters->rx_missed; + } - tp->tc_offset.tx_errors = counters->tx_errors; - tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; - tp->tc_offset.tx_aborted = counters->tx_aborted; - tp->tc_offset.rx_missed = counters->rx_missed; tp->tc_offset.inited = true; } @@ -3880,7 +3844,7 @@ static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp, static void rtl8169_rx_clear(struct rtl8169_private *tp) { - unsigned int i; + int i; for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) { dma_unmap_page(tp_to_dev(tp), @@ -3895,7 +3859,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) static int rtl8169_rx_fill(struct rtl8169_private *tp) { - unsigned int i; + int i; for (i = 0; i < NUM_RX_DESC; i++) { struct page *data; @@ -4170,13 +4134,13 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, return true; } -static bool rtl_tx_slots_avail(struct rtl8169_private *tp, - unsigned int nr_frags) +static bool rtl_tx_slots_avail(struct rtl8169_private *tp) { - unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx; + unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC + - READ_ONCE(tp->cur_tx); /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ - return slots_avail > nr_frags; + return slots_avail > MAX_SKB_FRAGS; } /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ @@ -4209,17 +4173,12 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, bool stop_queue, door_bell; u32 opts[2]; - txd_first = tp->TxDescArray + entry; - - if (unlikely(!rtl_tx_slots_avail(tp, frags))) { + if (unlikely(!rtl_tx_slots_avail(tp))) { if (net_ratelimit()) netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); goto err_stop_0; } - if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn)) - goto err_stop_0; - opts[1] = rtl8169_tx_vlan_tag(skb); opts[0] = 0; @@ -4232,6 +4191,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, entry, false))) goto err_dma_0; + txd_first = tp->TxDescArray + entry; + if (frags) { if (rtl8169_xmit_frags(tp, skb, opts, entry)) goto err_dma_1; @@ -4254,22 +4215,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, /* rtl_tx needs to see descriptor changes before updated tp->cur_tx */ smp_wmb(); - tp->cur_tx += frags + 1; + WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); - stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); + stop_queue = !rtl_tx_slots_avail(tp); if (unlikely(stop_queue)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ smp_wmb(); netif_stop_queue(dev); - door_bell = true; - } - - if (door_bell) - rtl8169_doorbell(tp); - - if (unlikely(stop_queue)) { /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -4277,11 +4231,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, * status and forget to wake up queue, a racing rtl_tx thread * can't. */ - smp_mb(); - if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) + smp_mb__after_atomic(); + if (rtl_tx_slots_avail(tp)) netif_start_queue(dev); + door_bell = true; } + if (door_bell) + rtl8169_doorbell(tp); + return NETDEV_TX_OK; err_dma_1: @@ -4390,20 +4348,20 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, int budget) { - unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0; + unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0; + struct sk_buff *skb; dirty_tx = tp->dirty_tx; - smp_rmb(); - for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) { + while (READ_ONCE(tp->cur_tx) != dirty_tx) { unsigned int entry = dirty_tx % NUM_TX_DESC; - struct sk_buff *skb = tp->tx_skb[entry].skb; u32 status; status = le32_to_cpu(tp->TxDescArray[entry].opts1); if (status & DescOwn) break; + skb = tp->tx_skb[entry].skb; rtl8169_unmap_tx_skb(tp, entry); if (skb) { @@ -4416,10 +4374,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, if (tp->dirty_tx != dirty_tx) { netdev_completed_queue(dev, pkts_compl, bytes_compl); + dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl); - rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl); - - tp->dirty_tx = dirty_tx; /* Sync with rtl8169_start_xmit: * - publish dirty_tx ring index (write barrier) * - refresh cur_tx ring index and queue status (read barrier) @@ -4427,18 +4383,18 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, * a racing xmit thread can only have a right view of the * ring status. */ - smp_mb(); - if (netif_queue_stopped(dev) && - rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + smp_store_mb(tp->dirty_tx, dirty_tx); + if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp)) netif_wake_queue(dev); - } /* * 8168 hack: TxPoll requests are lost when the Tx packets are * too close. Let's kick an extra TxPoll request when a burst * of start_xmit activity is detected (if it is not detected, * it is slow enough). -- FR + * If skb is NULL then we come here again once a tx irq is + * triggered after the last fragment is marked transmitted. */ - if (tp->cur_tx != dirty_tx) + if (tp->cur_tx != dirty_tx && skb) rtl8169_doorbell(tp); } } @@ -4459,15 +4415,13 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) skb_checksum_none_assert(skb); } -static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget) { - unsigned int cur_rx, rx_left, count; struct device *d = tp_to_dev(tp); + int count; - cur_rx = tp->cur_rx; - - for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { - unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC; + for (count = 0; count < budget; count++, tp->cur_rx++) { + unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescArray + entry; struct sk_buff *skb; const void *rx_buf; @@ -4539,14 +4493,11 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget napi_gro_receive(&tp->napi, skb); - rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size); + dev_sw_netstats_rx_add(dev, pkt_size); release_descriptor: rtl8169_mark_to_asic(desc); } - count = cur_rx - tp->cur_rx; - tp->cur_rx = cur_rx; - return count; } @@ -4605,7 +4556,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) struct net_device *dev = tp->dev; int work_done; - work_done = rtl_rx(dev, tp, (u32) budget); + work_done = rtl_rx(dev, tp, budget); rtl_tx(dev, tp, budget); @@ -4721,6 +4672,7 @@ static int rtl_open(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; + unsigned long irqflags; int retval = -ENOMEM; pm_runtime_get_sync(&pdev->dev); @@ -4732,7 +4684,7 @@ static int rtl_open(struct net_device *dev) tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, &tp->TxPhyAddr, GFP_KERNEL); if (!tp->TxDescArray) - goto err_pm_runtime_put; + goto out; tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, &tp->RxPhyAddr, GFP_KERNEL); @@ -4745,8 +4697,9 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); + irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED; retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt, - IRQF_SHARED, dev->name, tp); + irqflags, dev->name, tp); if (retval < 0) goto err_release_fw_2; @@ -4757,9 +4710,9 @@ static int rtl_open(struct net_device *dev) rtl8169_up(tp); rtl8169_init_counter_offsets(tp); netif_start_queue(dev); - - pm_runtime_put_sync(&pdev->dev); out: + pm_runtime_put_sync(&pdev->dev); + return retval; err_free_irq: @@ -4775,8 +4728,6 @@ err_free_tx_0: dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, tp->TxPhyAddr); tp->TxDescArray = NULL; -err_pm_runtime_put: - pm_runtime_put_noidle(&pdev->dev); goto out; } @@ -4790,9 +4741,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) pm_runtime_get_noresume(&pdev->dev); netdev_stats_to_stats64(stats, &dev->stats); - - rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes); - rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes); + dev_fetch_sw_netstats(stats, dev->tstats); /* * Fetch additional counter values missing in stats collected by driver @@ -5204,8 +5153,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp) if (rc == -ENOENT) /* clk-core allows NULL (for suspend / resume) */ rc = 0; - else if (rc != -EPROBE_DEFER) - dev_err(d, "failed to get clk: %d\n", rc); + else + dev_err_probe(d, rc, "failed to get clk\n"); } else { tp->clk = clk; rc = clk_prepare_enable(clk); @@ -5263,6 +5212,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->eee_adv = -1; tp->ocp_base = OCP_STD_PHY_BASE; + dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, + struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + /* Get the *optional* external "ether_clk" used on some boards */ rc = rtl_get_ether_clk(tp); if (rc) @@ -5340,8 +5294,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } INIT_WORK(&tp->wk.work, rtl_task); - u64_stats_init(&tp->rx_stats.syncp); - u64_stats_init(&tp->tx_stats.syncp); rtl_init_mac_address(tp); diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index 2590cab53e54..1f981dfe4bdc 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -285,7 +285,13 @@ typedef union efx_oword { field10, value10, \ field11, value11, \ field12, value12, \ - field13, value13) \ + field13, value13, \ + field14, value14, \ + field15, value15, \ + field16, value16, \ + field17, value17, \ + field18, value18, \ + field19, value19) \ (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ @@ -298,7 +304,13 @@ typedef union efx_oword { EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \ - EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13))) + EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19))) #define EFX_INSERT_FIELDS64(...) \ cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) @@ -340,7 +352,19 @@ typedef union efx_oword { #endif /* Populate an octword field with various numbers of arguments */ -#define EFX_POPULATE_OWORD_13 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_18(oword, ...) \ + EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_17(oword, ...) \ + EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_16(oword, ...) \ + EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_15(oword, ...) \ + EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_14(oword, ...) \ + EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_13(oword, ...) \ + EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_OWORD_12(oword, ...) \ EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_OWORD_11(oword, ...) \ @@ -375,7 +399,19 @@ typedef union efx_oword { EFX_DWORD_3, 0xffffffff) /* Populate a quadword field with various numbers of arguments */ -#define EFX_POPULATE_QWORD_13 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_18(qword, ...) \ + EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_17(qword, ...) \ + EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_16(qword, ...) \ + EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_15(qword, ...) \ + EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_14(qword, ...) \ + EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_13(qword, ...) \ + EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_QWORD_12(qword, ...) \ EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_QWORD_11(qword, ...) \ @@ -408,7 +444,19 @@ typedef union efx_oword { EFX_DWORD_1, 0xffffffff) /* Populate a dword field with various numbers of arguments */ -#define EFX_POPULATE_DWORD_13 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_18(dword, ...) \ + EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_17(dword, ...) \ + EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_16(dword, ...) \ + EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_15(dword, ...) \ + EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_14(dword, ...) \ + EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_13(dword, ...) \ + EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_DWORD_12(dword, ...) \ EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_DWORD_11(dword, ...) \ diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 3148fe770356..518268ce2064 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -182,8 +182,20 @@ static int efx_ef100_init_datapath_caps(struct efx_nic *efx) if (rc) return rc; - if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) - efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) { + struct net_device *net_dev = efx->net_dev; + netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; + + net_dev->features |= tso; + net_dev->hw_features |= tso; + net_dev->hw_enc_features |= tso; + /* EF100 HW can only offload outer checksums if they are UDP, + * so for GRE_CSUM we have to use GSO_PARTIAL. + */ + net_dev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + } efx->num_mac_stats = MCDI_WORD(outbuf, GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); netif_dbg(efx, probe, efx->net_dev, @@ -686,7 +698,7 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx, #define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \ NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \ - NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX) + NETIF_F_HW_VLAN_CTAG_TX) const struct efx_nic_type ef100_pf_nic_type = { .revision = EFX_REV_EF100, @@ -1101,6 +1113,9 @@ static int ef100_probe_main(struct efx_nic *efx) nic_data->efx = efx; net_dev->features |= efx->type->offload_features; net_dev->hw_features |= efx->type->offload_features; + net_dev->hw_enc_features |= efx->type->offload_features; + net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_HIGHDMA | NETIF_F_ALL_TSO; /* Populate design-parameter defaults */ nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT; diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c index a90e5a9d2a37..26ef51d6b542 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.c +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -54,8 +54,6 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) struct efx_nic *efx = tx_queue->efx; struct ef100_nic_data *nic_data; struct efx_tx_buffer *buffer; - struct tcphdr *tcphdr; - struct iphdr *iphdr; size_t header_len; u32 mss; @@ -98,20 +96,6 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) buffer->unmap_len = 0; buffer->skb = skb; ++tx_queue->insert_count; - - /* Adjust the TCP checksum to exclude the total length, since we set - * ED_INNER_IP_LEN in the descriptor. - */ - tcphdr = tcp_hdr(skb); - if (skb_is_gso_v6(skb)) { - tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } else { - iphdr = ip_hdr(skb); - tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr, - 0, IPPROTO_TCP, 0); - } return true; } @@ -203,34 +187,66 @@ static void ef100_make_tso_desc(struct efx_nic *efx, struct efx_tx_buffer *buffer, efx_oword_t *txd, unsigned int segment_count) { - u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) || - skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ? - ESE_GZ_TX_DESC_IP4_ID_NO_OP : - ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; - u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ? - skb_vlan_tag_present(skb) : 0; + bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL; unsigned int len, ip_offset, tcp_offset, payload_segs; + u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; + unsigned int outer_ip_offset, outer_l4_offset; u16 vlan_tci = skb_vlan_tag_get(skb); u32 mss = skb_shinfo(skb)->gso_size; + bool encap = skb->encapsulation; + bool udp_encap = false; + u16 vlan_enable = 0; + struct tcphdr *tcp; + bool outer_csum; + u32 paylen; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID) + mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP; + if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX) + vlan_enable = skb_vlan_tag_present(skb); len = skb->len - buffer->len; /* We use 1 for the TSO descriptor and 1 for the header */ payload_segs = segment_count - 2; - ip_offset = skb_network_offset(skb); - tcp_offset = skb_transport_offset(skb); + if (encap) { + outer_ip_offset = skb_network_offset(skb); + outer_l4_offset = skb_transport_offset(skb); + ip_offset = skb_inner_network_offset(skb); + tcp_offset = skb_inner_transport_offset(skb); + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) + udp_encap = true; + } else { + ip_offset = skb_network_offset(skb); + tcp_offset = skb_transport_offset(skb); + outer_ip_offset = outer_l4_offset = 0; + } + outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM; + + /* subtract TCP payload length from inner checksum */ + tcp = (void *)skb->data + tcp_offset; + paylen = skb->len - tcp_offset; + csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); - EFX_POPULATE_OWORD_13(*txd, + EFX_POPULATE_OWORD_19(*txd, ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO, ESF_GZ_TX_TSO_MSS, mss, ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1, ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs, ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1, ESF_GZ_TX_TSO_PAYLOAD_LEN, len, + ESF_GZ_TX_TSO_CSO_OUTER_L4, outer_csum, ESF_GZ_TX_TSO_CSO_INNER_L4, 1, ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1, ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1, ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid, ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1, + ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_ip_offset >> 1, + ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1, + ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial, + ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial, + ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid : + ESE_GZ_TX_DESC_IP4_ID_NO_OP, ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable, ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci ); diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 19cf7cac1e6e..68fc7d317693 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -262,7 +262,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) /* Initialise XDP queue information */ rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev, - rx_queue->core_index); + rx_queue->core_index, 0); if (rc) { netif_err(efx, rx_err, efx->net_dev, diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index b77e427e6729..c52a38df0e0d 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -8,7 +8,7 @@ config NET_VENDOR_SMSC default y depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \ ISA || MAC || MIPS || NIOS2 || PCI || \ - PCMCIA || SUPERH || XTENSA || H8300 + PCMCIA || SUPERH || XTENSA || H8300 || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -39,7 +39,7 @@ config SMC91X select MII depends on !OF || GPIOLIB depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \ - MIPS || NIOS2 || SUPERH || XTENSA || H8300 + MIPS || NIOS2 || SUPERH || XTENSA || H8300 || COMPILE_TEST help This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it @@ -78,7 +78,7 @@ config SMC911X tristate "SMSC LAN911[5678] support" select CRC32 select MII - depends on (ARM || SUPERH) + depends on (ARM || SUPERH || COMPILE_TEST) help This is a driver for SMSC's LAN911x series of Ethernet chipsets including the new LAN9115, LAN9116, LAN9117, and LAN9118. diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 01069dfaf75c..22cdbf12c823 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -102,7 +102,10 @@ MODULE_ALIAS("platform:smc911x"); #define PRINTK(dev, args...) netdev_info(dev, args) #else -#define DBG(n, dev, args...) do { } while (0) +#define DBG(n, dev, args...) \ + while (0) { \ + netdev_dbg(dev, args); \ + } #define PRINTK(dev, args...) netdev_dbg(dev, args) #endif @@ -462,9 +465,9 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | skb->len; #else - buf = (char*)((u32)skb->data & ~0x3); - len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3; - cmdA = (((u32)skb->data & 0x3) << 16) | + buf = (char *)((uintptr_t)skb->data & ~0x3); + len = (skb->len + 3 + ((uintptr_t)skb->data & 3)) & ~0x3; + cmdA = (((uintptr_t)skb->data & 0x3) << 16) | TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ | skb->len; #endif @@ -879,7 +882,7 @@ static void smc911x_phy_configure(struct work_struct *work) int phyaddr = lp->mii.phy_id; int my_phy_caps; /* My PHY capabilities */ int my_ad_caps; /* My Advertised capabilities */ - int status; + int status __always_unused; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__); @@ -973,7 +976,7 @@ static void smc911x_phy_interrupt(struct net_device *dev) { struct smc911x_local *lp = netdev_priv(dev); int phyaddr = lp->mii.phy_id; - int status; + int status __always_unused; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); @@ -2044,8 +2047,6 @@ static int smc911x_drv_probe(struct platform_device *pdev) void __iomem *addr; int ret; - /* ndev is not valid yet, so avoid passing it in. */ - DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index f6b73afd1879..742a1f7a838c 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -703,7 +703,8 @@ static void smc_tx(struct net_device *dev) { struct smc_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->base; - unsigned int saved_packet, packet_no, tx_status, pkt_len; + unsigned int saved_packet, packet_no, tx_status; + unsigned int pkt_len __always_unused; DBG(3, dev, "%s\n", __func__); @@ -2191,6 +2192,12 @@ MODULE_DEVICE_TABLE(of, smc91x_match); /** * of_try_set_control_gpio - configure a gpio if it exists + * @dev: net device + * @desc: where to store the GPIO descriptor, if it exists + * @name: name of the GPIO in DT + * @index: index of the GPIO in DT + * @value: set the GPIO to this value + * @nsdelay: delay before setting the GPIO */ static int try_toggle_control_gpio(struct device *dev, struct gpio_desc **desc, diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 1503cc9ec6e2..19d20a6d0d44 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -631,6 +631,7 @@ static void netsec_set_rx_de(struct netsec_priv *priv, static bool netsec_clean_tx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + struct xdp_frame_bulk bq; struct netsec_de *entry; int tail = dring->tail; unsigned int bytes; @@ -639,8 +640,11 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv) spin_lock(&dring->lock); bytes = 0; + xdp_frame_bulk_init(&bq); entry = dring->vaddr + DESC_SZ * tail; + rcu_read_lock(); /* need for xdp_return_frame_bulk */ + while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && cnt < DESC_NUM) { struct netsec_desc *desc; @@ -665,7 +669,10 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv) dev_kfree_skb(desc->skb); } else { bytes += desc->xdpf->len; - xdp_return_frame(desc->xdpf); + if (desc->buf_type == TYPE_NETSEC_XDP_TX) + xdp_return_frame_rx_napi(desc->xdpf); + else + xdp_return_frame_bulk(desc->xdpf, &bq); } next: /* clean up so netsec_uninit_pkt_dring() won't free the skb @@ -684,6 +691,9 @@ next: entry = dring->vaddr + DESC_SZ * tail; cnt++; } + xdp_flush_frame_bulk(&bq); + + rcu_read_unlock(); spin_unlock(&dring->lock); @@ -1304,7 +1314,7 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv) goto err_out; } - err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0); + err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0, priv->napi.napi_id); if (err) goto err_out; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index df7de50497a0..6f271c46368d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -402,6 +402,7 @@ struct dma_features { /* Default LPI timers */ #define STMMAC_DEFAULT_LIT_LS 0x3E8 #define STMMAC_DEFAULT_TWT_LS 0x1E +#define STMMAC_ET_MAX 0xFFFFF #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 2342d497348e..27254b27d7ed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -119,23 +119,23 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, return 0; } -static void *dwc_qos_probe(struct platform_device *pdev, - struct plat_stmmacenet_data *plat_dat, - struct stmmac_resources *stmmac_res) +static int dwc_qos_probe(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *stmmac_res) { int err; plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); if (IS_ERR(plat_dat->stmmac_clk)) { dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - return ERR_CAST(plat_dat->stmmac_clk); + return PTR_ERR(plat_dat->stmmac_clk); } err = clk_prepare_enable(plat_dat->stmmac_clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n", err); - return ERR_PTR(err); + return err; } plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); @@ -152,11 +152,11 @@ static void *dwc_qos_probe(struct platform_device *pdev, goto disable; } - return NULL; + return 0; disable: clk_disable_unprepare(plat_dat->stmmac_clk); - return ERR_PTR(err); + return err; } static int dwc_qos_remove(struct platform_device *pdev) @@ -267,19 +267,17 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv) return 0; } -static void *tegra_eqos_probe(struct platform_device *pdev, - struct plat_stmmacenet_data *data, - struct stmmac_resources *res) +static int tegra_eqos_probe(struct platform_device *pdev, + struct plat_stmmacenet_data *data, + struct stmmac_resources *res) { struct device *dev = &pdev->dev; struct tegra_eqos *eqos; int err; eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL); - if (!eqos) { - err = -ENOMEM; - goto error; - } + if (!eqos) + return -ENOMEM; eqos->dev = &pdev->dev; eqos->regs = res->addr; @@ -368,9 +366,7 @@ bypass_clk_reset_gpio: if (err < 0) goto reset; -out: - return eqos; - + return 0; reset: reset_control_assert(eqos->rst); reset_phy: @@ -384,8 +380,7 @@ disable_slave: disable_master: clk_disable_unprepare(eqos->clk_master); error: - eqos = ERR_PTR(err); - goto out; + return err; } static int tegra_eqos_remove(struct platform_device *pdev) @@ -403,9 +398,9 @@ static int tegra_eqos_remove(struct platform_device *pdev) } struct dwc_eth_dwmac_data { - void *(*probe)(struct platform_device *pdev, - struct plat_stmmacenet_data *data, - struct stmmac_resources *res); + int (*probe)(struct platform_device *pdev, + struct plat_stmmacenet_data *data, + struct stmmac_resources *res); int (*remove)(struct platform_device *pdev); }; @@ -424,7 +419,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) const struct dwc_eth_dwmac_data *data; struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; - void *priv; int ret; data = device_get_match_data(&pdev->dev); @@ -448,10 +442,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - priv = data->probe(pdev, plat_dat, &stmmac_res); - if (IS_ERR(priv)) { - ret = PTR_ERR(priv); - + ret = data->probe(pdev, plat_dat, &stmmac_res); + if (ret < 0) { if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 81ee0a071b4e..a2e80c89de2d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -236,6 +236,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, int ret; int i; + plat->phy_addr = -1; plat->clk_csr = 5; plat->has_gmac = 0; plat->has_gmac4 = 1; @@ -345,7 +346,6 @@ static int ehl_sgmii_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 1; - plat->phy_addr = 0; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; plat->serdes_powerup = intel_serdes_powerup; @@ -362,7 +362,6 @@ static int ehl_rgmii_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 1; - plat->phy_addr = 0; plat->phy_interface = PHY_INTERFACE_MODE_RGMII; return ehl_common_data(pdev, plat); @@ -376,7 +375,6 @@ static int ehl_pse0_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 2; - plat->phy_addr = 1; return ehl_common_data(pdev, plat); } @@ -408,7 +406,6 @@ static int ehl_pse1_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 3; - plat->phy_addr = 1; return ehl_common_data(pdev, plat); } @@ -450,7 +447,6 @@ static int tgl_sgmii_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 1; - plat->phy_addr = 0; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 6d6bd77bb6af..459ae715b33d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -299,7 +299,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) dev_err(dwmac->dev, "unsupported phy-mode %s\n", phy_modes(dwmac->phy_mode)); return -EINVAL; - }; + } if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) { if (!dwmac->timing_adj_clk) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 592b043f9676..82df91c130f7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -176,9 +176,11 @@ enum power_event { */ #define GMAC4_LPI_CTRL_STATUS 0xd0 #define GMAC4_LPI_TIMER_CTRL 0xd4 +#define GMAC4_LPI_ENTRY_TIMER 0xd8 /* LPI control and status defines */ #define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ +#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */ #define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ #define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ #define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index ced6d76a0d85..29f765a246a0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -379,6 +379,27 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); } +static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et) +{ + void __iomem *ioaddr = hw->pcsr; + int value = et & STMMAC_ET_MAX; + int regval; + + /* Program LPI entry timer value into register */ + writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER); + + /* Enable/disable LPI entry timer */ + regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + + if (et) + regval |= GMAC4_LPI_CTRL_STATUS_LPIATE; + else + regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE; + + writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS); +} + static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) { void __iomem *ioaddr = hw->pcsr; @@ -1164,6 +1185,7 @@ const struct stmmac_ops dwmac4_ops = { .get_umac_addr = dwmac4_get_umac_addr, .set_eee_mode = dwmac4_set_eee_mode, .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, @@ -1205,6 +1227,7 @@ const struct stmmac_ops dwmac410_ops = { .get_umac_addr = dwmac4_get_umac_addr, .set_eee_mode = dwmac4_set_eee_mode, .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, @@ -1249,6 +1272,7 @@ const struct stmmac_ops dwmac510_ops = { .get_umac_addr = dwmac4_get_umac_addr, .set_eee_mode = dwmac4_set_eee_mode, .reset_eee_mode = dwmac4_reset_eee_mode, + .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 67ba67ed0cb9..03e79a677c8b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -305,17 +305,13 @@ int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, static int dwmac5_rxp_disable(void __iomem *ioaddr) { u32 val; - int ret; val = readl(ioaddr + MTL_OPERATION_MODE); val &= ~MTL_FRPE; writel(val, ioaddr + MTL_OPERATION_MODE); - ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, + return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, val & RXPI, 1, 10000); - if (ret) - return ret; - return 0; } static void dwmac5_rxp_enable(void __iomem *ioaddr) diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index e2dca9b6e992..b40b2e0667bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -337,6 +337,7 @@ struct stmmac_ops { void (*set_eee_mode)(struct mac_device_info *hw, bool en_tx_lpi_clockgating); void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, @@ -439,6 +440,8 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) #define stmmac_reset_eee_mode(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) +#define stmmac_set_eee_lpi_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args) #define stmmac_set_eee_timer(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) #define stmmac_set_eee_pls(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 727e68dfaf1c..e553b9a1f785 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -13,6 +13,7 @@ #define DRV_MODULE_VERSION "Jan_2016" #include <linux/clk.h> +#include <linux/hrtimer.h> #include <linux/if_vlan.h> #include <linux/stmmac.h> #include <linux/phylink.h> @@ -46,7 +47,7 @@ struct stmmac_tx_info { struct stmmac_tx_queue { u32 tx_count_frames; int tbs; - struct timer_list txtimer; + struct hrtimer txtimer; u32 queue_index; struct stmmac_priv *priv_data; struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; @@ -207,6 +208,7 @@ struct stmmac_priv { int tx_lpi_timer; int tx_lpi_enabled; int eee_tw_timer; + bool eee_sw_timer_en; unsigned int mode; unsigned int chain_mode; int extend_desc; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c33db79cdd0a..5b1c12ff98c0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -111,7 +111,7 @@ static void stmmac_init_fs(struct net_device *dev); static void stmmac_exit_fs(struct net_device *dev); #endif -#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) +#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) /** * stmmac_verify_args - verify the driver parameters. @@ -294,6 +294,16 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) return dirty; } +static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) +{ + int tx_lpi_timer; + + /* Clear/set the SW EEE timer flag based on LPI ET enablement */ + priv->eee_sw_timer_en = en ? 0 : 1; + tx_lpi_timer = en ? priv->tx_lpi_timer : 0; + stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); +} + /** * stmmac_enable_eee_mode - check and enter in LPI mode * @priv: driver private structure @@ -327,6 +337,11 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) */ void stmmac_disable_eee_mode(struct stmmac_priv *priv) { + if (!priv->eee_sw_timer_en) { + stmmac_lpi_entry_timer_config(priv, 0); + return; + } + stmmac_reset_eee_mode(priv, priv->hw); del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; @@ -376,6 +391,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) if (!priv->eee_active) { if (priv->eee_enabled) { netdev_dbg(priv->dev, "disable EEE\n"); + stmmac_lpi_entry_timer_config(priv, 0); del_timer_sync(&priv->eee_ctrl_timer); stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); } @@ -389,7 +405,15 @@ bool stmmac_eee_init(struct stmmac_priv *priv) eee_tw_timer); } - mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); + if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { + del_timer_sync(&priv->eee_ctrl_timer); + priv->tx_path_in_lpi_mode = false; + stmmac_lpi_entry_timer_config(priv, 1); + } else { + stmmac_lpi_entry_timer_config(priv, 0); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(priv->tx_lpi_timer)); + } mutex_unlock(&priv->lock); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); @@ -2057,14 +2081,16 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); } - if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { + if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && + priv->eee_sw_timer_en) { stmmac_enable_eee_mode(priv); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); } /* We still have pending packets, let's call for a new scheduling */ if (tx_q->dirty_tx != tx_q->cur_tx) - mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); + hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + HRTIMER_MODE_REL); __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2348,7 +2374,8 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; - mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); + hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer), + HRTIMER_MODE_REL); } /** @@ -2357,9 +2384,9 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) * Description: * This is the timer handler to directly invoke the stmmac_tx_clean. */ -static void stmmac_tx_timer(struct timer_list *t) +static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) { - struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); + struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); struct stmmac_priv *priv = tx_q->priv_data; struct stmmac_channel *ch; @@ -2373,6 +2400,8 @@ static void stmmac_tx_timer(struct timer_list *t) spin_unlock_irqrestore(&ch->lock, flags); __napi_schedule(&ch->tx_napi); } + + return HRTIMER_NORESTART; } /** @@ -2395,7 +2424,8 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv) for (chan = 0; chan < tx_channel_count; chan++) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; - timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); + hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx_q->txtimer.function = stmmac_tx_timer; } } @@ -2887,7 +2917,7 @@ irq_error: phylink_stop(priv->phylink); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - del_timer_sync(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->tx_queue[chan].txtimer); stmmac_hw_teardown(dev); init_error: @@ -2917,7 +2947,7 @@ static int stmmac_release(struct net_device *dev) stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - del_timer_sync(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->tx_queue[chan].txtimer); /* Free the IRQ lines */ free_irq(dev->irq, dev); @@ -3321,7 +3351,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q = &priv->tx_queue[queue]; first_tx = tx_q->cur_tx; - if (priv->tx_path_in_lpi_mode) + if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) stmmac_disable_eee_mode(priv); /* Manage oversized TCP frames for GMAC4 device */ @@ -5163,7 +5193,7 @@ int stmmac_suspend(struct device *dev) stmmac_disable_all_queues(priv); for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) - del_timer_sync(&priv->tx_queue[chan].txtimer); + hrtimer_cancel(&priv->tx_queue[chan].txtimer); if (priv->eee_enabled) { priv->tx_path_in_lpi_mode = false; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index b2a707e2ef43..d64116e0543e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -365,6 +365,9 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->name = "stmmac"; + if (priv->plat->has_gmac4) + new_bus->probe_capabilities = MDIOBUS_C22_C45; + if (priv->plat->has_xgmac) { new_bus->read = &stmmac_xgmac2_mdio_read; new_bus->write = &stmmac_xgmac2_mdio_write; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index af34a4cadbb0..6dc9f10414e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -399,6 +399,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + void *ret; int rc; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); @@ -576,12 +577,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) clk_prepare_enable(plat->stmmac_clk); } - plat->pclk = devm_clk_get(&pdev->dev, "pclk"); + plat->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); if (IS_ERR(plat->pclk)) { - if (PTR_ERR(plat->pclk) == -EPROBE_DEFER) - goto error_pclk_get; - - plat->pclk = NULL; + ret = plat->pclk; + goto error_pclk_get; } clk_prepare_enable(plat->pclk); @@ -596,14 +595,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate); } - plat->stmmac_rst = devm_reset_control_get(&pdev->dev, - STMMAC_RESOURCE_NAME); + plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev, + STMMAC_RESOURCE_NAME); if (IS_ERR(plat->stmmac_rst)) { - if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER) - goto error_hw_init; - - dev_info(&pdev->dev, "no reset control found\n"); - plat->stmmac_rst = NULL; + ret = plat->stmmac_rst; + goto error_hw_init; } return plat; @@ -613,7 +609,7 @@ error_hw_init: error_pclk_get: clk_disable_unprepare(plat->stmmac_clk); - return ERR_PTR(-EPROBE_DEFER); + return ret; } /** diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index cc27d660a818..f5bed4d26e80 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -209,17 +209,11 @@ err_unfill: static int tc_delete_knode(struct stmmac_priv *priv, struct tc_cls_u32_offload *cls) { - int ret; - /* Set entry and fragments as not used */ tc_unfill_entry(priv, cls); - ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, - priv->tc_entries_max); - if (ret) - return ret; - - return 0; + return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); } static int tc_setup_cls_u32(struct stmmac_priv *priv, diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 501d676fd88b..766e8866bbef 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -241,8 +241,8 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, if (!vid) unreg_mcast = port_mask; dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); - ret = cpsw_ale_add_vlan(common->ale, vid, port_mask, - unreg_mcast, port_mask, 0); + ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask, + unreg_mcast, port_mask, 0); pm_runtime_put(common->dev); return ret; @@ -252,6 +252,7 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); + struct am65_cpsw_port *port = am65_ndev_to_port(ndev); int ret; if (!netif_running(ndev) || !vid) @@ -264,14 +265,15 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, } dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); - ret = cpsw_ale_del_vlan(common->ale, vid, 0); + ret = cpsw_ale_del_vlan(common->ale, vid, + BIT(port->port_id) | ALE_PORT_HOST); pm_runtime_put(common->dev); return ret; } -static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port, - bool promisc) +static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port, + bool promisc) { struct am65_cpsw_common *common = port->common; @@ -296,7 +298,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) bool promisc; promisc = !!(ndev->flags & IFF_PROMISC); - am65_cpsw_slave_set_promisc_2g(port, promisc); + am65_cpsw_slave_set_promisc(port, promisc); if (promisc) return; @@ -373,7 +375,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, AM65_CPSW_NAV_PS_DATA_SIZE); - cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb)); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb)); swdata = cppi5_hdesc_get_swdata(desc_rx); *((void **)swdata) = skb; @@ -426,9 +428,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, writel(common->rx_flow_id_base, host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); /* en tx crc offload */ - if (features & NETIF_F_HW_CSUM) - writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, - host_p->port_base + AM65_CPSW_P0_REG_CTL); + writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL); am65_cpsw_nuss_set_p0_ptype(common); @@ -629,13 +629,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); - if (port->slave.mac_only) + if (port->slave.mac_only) { /* enable mac-only mode on port */ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_MACONLY, 1); - if (AM65_CPSW_IS_CPSW2G(common)) cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1); + } port_mask = BIT(port->port_id) | ALE_PORT_HOST; cpsw_ale_add_ucast(common->ale, ndev->dev_addr, @@ -767,7 +767,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, return ret; } - if (desc_dma & 0x1) { + if (cppi5_desc_is_tdcm(desc_dma)) { dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); return 0; } @@ -911,10 +911,57 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) dev_kfree_skb_any(skb); } +static struct sk_buff * +am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn, + dma_addr_t desc_dma) +{ + struct am65_cpsw_ndev_priv *ndev_priv; + struct am65_cpsw_ndev_stats *stats; + struct cppi5_host_desc_t *desc_tx; + struct net_device *ndev; + struct sk_buff *skb; + void **swdata; + + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + skb = *(swdata); + am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx); + + ndev = skb->dev; + + am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); + + ndev_priv = netdev_priv(ndev); + stats = this_cpu_ptr(ndev_priv->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + return skb; +} + +static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, + struct netdev_queue *netif_txq) +{ + if (netif_tx_queue_stopped(netif_txq)) { + /* Check whether the queue is stopped due to stalled + * tx dma, if the queue is stopped then wake the queue + * as we have free desc for tx + */ + __netif_tx_lock(netif_txq, smp_processor_id()); + if (netif_running(ndev) && + (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS)) + netif_tx_wake_queue(netif_txq); + + __netif_tx_unlock(netif_txq); + } +} + static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, int chn, unsigned int budget) { - struct cppi5_host_desc_t *desc_tx; struct device *dev = common->dev; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; @@ -923,41 +970,68 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; - void **swdata; tx_chn = &common->tx_chns[chn]; while (true) { - struct am65_cpsw_ndev_priv *ndev_priv; - struct am65_cpsw_ndev_stats *stats; - + spin_lock(&tx_chn->lock); res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); + spin_unlock(&tx_chn->lock); if (res == -ENODATA) break; - if (desc_dma & 0x1) { + if (cppi5_desc_is_tdcm(desc_dma)) { if (atomic_dec_and_test(&common->tdown_cnt)) complete(&common->tdown_complete); break; } - desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, - desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_tx); - skb = *(swdata); - am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx); - + skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma); + total_bytes = skb->len; ndev = skb->dev; + napi_consume_skb(skb, budget); + num_tx++; - am65_cpts_tx_timestamp(common->cpts, skb); + netif_txq = netdev_get_tx_queue(ndev, chn); - ndev_priv = netdev_priv(ndev); - stats = this_cpu_ptr(ndev_priv->stats); - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); + netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); + am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); + } + + dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); + + return num_tx; +} + +static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, + int chn, unsigned int budget) +{ + struct device *dev = common->dev; + struct am65_cpsw_tx_chn *tx_chn; + struct netdev_queue *netif_txq; + unsigned int total_bytes = 0; + struct net_device *ndev; + struct sk_buff *skb; + dma_addr_t desc_dma; + int res, num_tx = 0; + + tx_chn = &common->tx_chns[chn]; + + while (true) { + res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); + if (res == -ENODATA) + break; + + if (cppi5_desc_is_tdcm(desc_dma)) { + if (atomic_dec_and_test(&common->tdown_cnt)) + complete(&common->tdown_complete); + break; + } + + skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma); + + ndev = skb->dev; total_bytes += skb->len; napi_consume_skb(skb, budget); num_tx++; @@ -970,19 +1044,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); - if (netif_tx_queue_stopped(netif_txq)) { - /* Check whether the queue is stopped due to stalled tx dma, - * if the queue is stopped then wake the queue as - * we have free desc for tx - */ - __netif_tx_lock(netif_txq, smp_processor_id()); - if (netif_running(ndev) && - (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= - MAX_SKB_FRAGS)) - netif_tx_wake_queue(netif_txq); + am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); - __netif_tx_unlock(netif_txq); - } dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); return num_tx; @@ -993,8 +1056,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); int num_tx; - num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, - budget); + if (AM65_CPSW_IS_CPSW2G(tx_chn->common)) + num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget); + else + num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget); + num_tx = min(num_tx, budget); if (num_tx < budget) { napi_complete(napi_tx); @@ -1139,7 +1205,13 @@ done_tx: cppi5_hdesc_set_pktlen(first_desc, pkt_len); desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); - ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + if (AM65_CPSW_IS_CPSW2G(common)) { + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + } else { + spin_lock_bh(&tx_chn->lock); + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + spin_unlock_bh(&tx_chn->lock); + } if (ret) { dev_err(dev, "can't push desc %d\n", ret); /* inform bql */ @@ -1369,32 +1441,7 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, stats->tx_dropped = dev->stats.tx_dropped; } -static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev, - netdev_features_t features) -{ - struct am65_cpsw_common *common = am65_ndev_to_common(ndev); - netdev_features_t changes = features ^ ndev->features; - struct am65_cpsw_host *host_p; - - host_p = am65_common_get_host(common); - - if (changes & NETIF_F_HW_CSUM) { - bool enable = !!(features & NETIF_F_HW_CSUM); - - dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n", - enable ? "ON" : "OFF"); - if (enable) - writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, - host_p->port_base + AM65_CPSW_P0_REG_CTL); - else - writel(0, - host_p->port_base + AM65_CPSW_P0_REG_CTL); - } - - return 0; -} - -static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = { +static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_open = am65_cpsw_nuss_ndo_slave_open, .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, @@ -1406,7 +1453,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = { .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, - .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, }; @@ -1417,7 +1463,6 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) if (!port->disabled) return; - common->disabled_ports_mask |= BIT(port->port_id); cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); @@ -1496,6 +1541,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) snprintf(tx_chn->tx_chn_name, sizeof(tx_chn->tx_chn_name), "tx%d", i); + spin_lock_init(&tx_chn->lock); tx_chn->common = common; tx_chn->id = i; tx_chn->descs_num = max_desc_num; @@ -1515,9 +1561,8 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) tx_chn->tx_chn_name, &tx_cfg); if (IS_ERR(tx_chn->tx_chn)) { - ret = PTR_ERR(tx_chn->tx_chn); - dev_err(dev, "Failed to request tx dma channel %d\n", - ret); + ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn), + "Failed to request tx dma channel\n"); goto err; } @@ -1588,8 +1633,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); if (IS_ERR(rx_chn->rx_chn)) { - ret = PTR_ERR(rx_chn->rx_chn); - dev_err(dev, "Failed to request rx dma channel %d\n", ret); + ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn), + "Failed to request rx dma channel\n"); goto err; } @@ -1606,7 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) }; struct k3_ring_cfg fdqring_cfg = { .elm_size = K3_RINGACC_RING_ELSIZE_8, - .mode = K3_RINGACC_RING_MODE_MESSAGE, .flags = K3_RINGACC_RING_SHARED, }; struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { @@ -1620,6 +1664,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) rx_flow_cfg.ring_rxfdq0_id = fdqring_id; rx_flow_cfg.rx_cfg.size = max_desc_num; rx_flow_cfg.rxfdq_cfg.size = max_desc_num; + rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode; ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, i, &rx_flow_cfg); @@ -1725,6 +1770,13 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common) return ret; } common->cpts = cpts; + /* Forbid PM runtime if CPTS is running. + * K3 CPSWxG modules may completely lose context during ON->OFF + * transitions depending on integration. + * AM65x/J721E MCU CPSW2G: false + * J721E MAIN_CPSW9G: true + */ + pm_runtime_forbid(dev); return 0; } @@ -1778,8 +1830,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) return PTR_ERR(port->slave.mac_sl); port->disabled = !of_device_is_available(port_np); - if (port->disabled) + if (port->disabled) { + common->disabled_ports_mask |= BIT(port->port_id); continue; + } port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); if (IS_ERR(port->slave.ifphy)) { @@ -1795,12 +1849,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) /* get phy/link info */ if (of_phy_is_fixed_link(port_np)) { ret = of_phy_register_fixed_link(port_np); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", - port_np, ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, + "failed to register fixed-link phy %pOF\n", + port_np); port->slave.phy_node = of_node_get(port_np); } else { port->slave.phy_node = @@ -1833,6 +1885,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) } of_node_put(node); + /* is there at least one ext.port */ + if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) { + dev_err(dev, "No Ext. port are available\n"); + return -ENODEV; + } + return 0; } @@ -1843,14 +1901,18 @@ static void am65_cpsw_pcpu_stats_free(void *data) free_percpu(stats); } -static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) +static int +am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) { struct am65_cpsw_ndev_priv *ndev_priv; struct device *dev = common->dev; struct am65_cpsw_port *port; int ret; - port = am65_common_get_port(common, 1); + port = &common->ports[port_idx]; + + if (port->disabled) + return 0; /* alloc netdev */ port->ndev = devm_alloc_etherdev_mqs(common->dev, @@ -1879,7 +1941,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) port->ndev->features = port->ndev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; port->ndev->vlan_features |= NETIF_F_SG; - port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g; + port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops; port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; /* Disable TX checksum offload by default due to HW bug */ @@ -1892,29 +1954,41 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free, ndev_priv->stats); - if (ret) { - dev_err(dev, "Failed to add percpu stat free action %d\n", ret); - return ret; + if (ret) + dev_err(dev, "failed to add percpu stat free action %d\n", ret); + + if (!common->dma_ndev) + common->dma_ndev = port->ndev; + + return ret; +} + +static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common) +{ + int ret; + int i; + + for (i = 0; i < common->port_num; i++) { + ret = am65_cpsw_nuss_init_port_ndev(common, i); + if (ret) + return ret; } - netif_napi_add(port->ndev, &common->napi_rx, + netif_napi_add(common->dma_ndev, &common->napi_rx, am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT); return ret; } -static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common) +static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) { struct device *dev = common->dev; - struct am65_cpsw_port *port; int i, ret = 0; - port = am65_common_get_port(common, 1); - for (i = 0; i < common->tx_ch_num; i++) { struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - netif_tx_napi_add(port->ndev, &tx_chn->napi_tx, + netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx, am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT); ret = devm_request_irq(dev, tx_chn->irq, @@ -1932,16 +2006,27 @@ err: return ret; } -static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common) +static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) +{ + struct am65_cpsw_port *port; + int i; + + for (i = 0; i < common->port_num; i++) { + port = &common->ports[i]; + if (port->ndev) + unregister_netdev(port->ndev); + } +} + +static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common) { struct device *dev = common->dev; struct am65_cpsw_port *port; - int ret = 0; + int ret = 0, i; - port = am65_common_get_port(common, 1); - ret = am65_cpsw_nuss_ndev_add_napi_2g(common); + ret = am65_cpsw_nuss_ndev_add_tx_napi(common); if (ret) - goto err; + return ret; ret = devm_request_irq(dev, common->rx_chns.irq, am65_cpsw_nuss_rx_irq, @@ -1949,17 +2034,31 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common) if (ret) { dev_err(dev, "failure requesting rx irq %u, %d\n", common->rx_chns.irq, ret); - goto err; + return ret; + } + + for (i = 0; i < common->port_num; i++) { + port = &common->ports[i]; + + if (!port->ndev) + continue; + + ret = register_netdev(port->ndev); + if (ret) { + dev_err(dev, "error registering slave net device%i %d\n", + i, ret); + goto err_cleanup_ndev; + } } - ret = register_netdev(port->ndev); - if (ret) - dev_err(dev, "error registering slave net device %d\n", ret); /* can't auto unregister ndev using devm_add_action() due to * devres release sequence in DD core for DMA */ -err: + return 0; + +err_cleanup_ndev: + am65_cpsw_nuss_cleanup_ndev(common); return ret; } @@ -1972,19 +2071,7 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx) if (ret) return ret; - return am65_cpsw_nuss_ndev_add_napi_2g(common); -} - -static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) -{ - struct am65_cpsw_port *port; - int i; - - for (i = 0; i < common->port_num; i++) { - port = &common->ports[i]; - if (port->ndev) - unregister_netdev(port->ndev); - } + return am65_cpsw_nuss_ndev_add_tx_napi(common); } struct am65_cpsw_soc_pdata { @@ -2005,10 +2092,14 @@ static const struct soc_device_attribute am65_cpsw_socinfo[] = { static const struct am65_cpsw_pdata am65x_sr1_0 = { .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, + .ale_dev_id = "am65x-cpsw2g", + .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, }; static const struct am65_cpsw_pdata j721e_pdata = { .quirks = 0, + .ale_dev_id = "am65x-cpsw2g", + .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, }; static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { @@ -2068,9 +2159,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) return -ENOENT; of_node_put(node); - if (common->port_num != 1) - return -EOPNOTSUPP; - common->rx_flow_id_base = -1; init_completion(&common->tdown_complete); common->tx_ch_num = 1; @@ -2089,13 +2177,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) return -ENOMEM; clk = devm_clk_get(dev, "fck"); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - - if (ret != -EPROBE_DEFER) - dev_err(dev, "error getting fck clock %d\n", ret); - return ret; - } + if (IS_ERR(clk)) + return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n"); common->bus_freq = clk_get_rate(clk); pm_runtime_enable(dev); @@ -2145,7 +2228,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; ale_params.ale_ports = common->port_num + 1; ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; - ale_params.dev_id = "am65x-cpsw2g"; + ale_params.dev_id = common->pdata.ale_dev_id; ale_params.bus_freq = common->bus_freq; common->ale = cpsw_ale_create(&ale_params); @@ -2165,11 +2248,11 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) dev_set_drvdata(dev, common); - ret = am65_cpsw_nuss_init_ndev_2g(common); + ret = am65_cpsw_nuss_init_ndevs(common); if (ret) goto err_of_clear; - ret = am65_cpsw_nuss_ndev_reg_2g(common); + ret = am65_cpsw_nuss_register_ndevs(common); if (ret) goto err_of_clear; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 993e1d4d3222..02aed4c0ceba 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -11,6 +11,7 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> #include "am65-cpsw-qos.h" struct am65_cpts; @@ -59,6 +60,7 @@ struct am65_cpsw_tx_chn { struct am65_cpsw_common *common; struct k3_cppi_desc_pool *desc_pool; struct k3_udma_glue_tx_channel *tx_chn; + spinlock_t lock; /* protect TX rings in multi-port mode */ int irq; u32 id; u32 descs_num; @@ -77,6 +79,8 @@ struct am65_cpsw_rx_chn { struct am65_cpsw_pdata { u32 quirks; + enum k3_ring_mode fdqring_mode; + const char *ale_dev_id; }; struct am65_cpsw_common { @@ -91,6 +95,7 @@ struct am65_cpsw_common { struct am65_cpsw_host host; struct am65_cpsw_port *ports; u32 disabled_ports_mask; + struct net_device *dma_ndev; int usage_count; /* number of opened ports */ struct cpsw_ale *ale; diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index a6a455c32628..cdc308a2aa3e 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -634,8 +634,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag, return 0; } -static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, - u16 vid, int port_mask) +static void cpsw_ale_vlan_del_modify_int(struct cpsw_ale *ale, u32 *ale_entry, + u16 vid, int port_mask) { int reg_mcast, unreg_mcast; int members, untag; @@ -644,6 +644,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, ALE_ENT_VID_MEMBER_LIST); members &= ~port_mask; if (!members) { + cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); return; } @@ -673,7 +674,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry, ALE_ENT_VID_MEMBER_LIST, members); } -int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) +int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask) { u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; int idx; @@ -684,11 +685,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) cpsw_ale_read(ale, idx, ale_entry); - if (port_mask) { - cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask); - } else { + cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask); + cpsw_ale_write(ale, idx, ale_entry); + + return 0; +} + +int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask) +{ + u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0}; + int members, idx; + + idx = cpsw_ale_match_vlan(ale, vid); + if (idx < 0) + return -ENOENT; + + cpsw_ale_read(ale, idx, ale_entry); + + /* if !port_mask - force remove VLAN (legacy). + * Check if there are other VLAN members ports + * if no - remove VLAN. + * if yes it means same VLAN was added to >1 port in multi port mode, so + * remove port_mask ports from VLAN ALE entry excluding Host port. + */ + members = cpsw_ale_vlan_get_fld(ale, ale_entry, ALE_ENT_VID_MEMBER_LIST); + members &= ~port_mask; + + if (!port_mask || !members) { + /* last port or force remove - remove VLAN */ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + } else { + port_mask &= ~ALE_PORT_HOST; + cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask); } cpsw_ale_write(ale, idx, ale_entry); diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index 5e4a69662c5f..13fe47687fde 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -134,6 +134,7 @@ static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid) int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask, int untag_mask, int reg_mcast, int unreg_mcast); +int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask); void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask, bool add); diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 424e644724e4..99f44563e10f 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1186,7 +1186,7 @@ static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch) pool = cpsw->page_pool[ch]; rxq = &priv->xdp_rxq[ch]; - ret = xdp_rxq_info_reg(rxq, priv->ndev, ch); + ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0); if (ret) return ret; diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c index 985a929bb957..29747da5c514 100644 --- a/drivers/net/ethernet/ti/cpsw_switchdev.c +++ b/drivers/net/ethernet/ti/cpsw_switchdev.c @@ -227,7 +227,7 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid, else port_mask = BIT(priv->emac_port); - ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask); + ret = cpsw_ale_vlan_del_modify(cpsw->ale, vid, port_mask); if (ret != 0) return ret; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 702fdc393da0..cfff3d48807a 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -381,9 +381,9 @@ static int davinci_mdio_probe(struct platform_device *pdev) } data->bus->name = dev_name(dev); - data->bus->read = davinci_mdio_read, - data->bus->write = davinci_mdio_write, - data->bus->reset = davinci_mdio_reset, + data->bus->read = davinci_mdio_read; + data->bus->write = davinci_mdio_write; + data->bus->reset = davinci_mdio_reset; data->bus->parent = dev; data->bus->priv = data; diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 267c080ee084..0b2ce4bdc2c3 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -186,6 +186,7 @@ static void tlan_reset_adapter(struct net_device *); static void tlan_finish_reset(struct net_device *); static void tlan_set_mac(struct net_device *, int areg, char *mac); +static void __tlan_phy_print(struct net_device *); static void tlan_phy_print(struct net_device *); static void tlan_phy_detect(struct net_device *); static void tlan_phy_power_down(struct net_device *); @@ -201,9 +202,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *); static int tlan_phy_dp83840a_check(struct net_device *); */ -static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); +static bool __tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); +static void tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); static void tlan_mii_send_data(u16, u32, unsigned); static void tlan_mii_sync(u16); +static void __tlan_mii_write_reg(struct net_device *, u16, u16, u16); static void tlan_mii_write_reg(struct net_device *, u16, u16, u16); static void tlan_ee_send_start(u16); @@ -242,23 +245,20 @@ static u32 tlan_handle_rx_eoc }; -static inline void +static void tlan_set_timer(struct net_device *dev, u32 ticks, u32 type) { struct tlan_priv *priv = netdev_priv(dev); unsigned long flags = 0; - if (!in_irq()) - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&priv->lock, flags); if (priv->timer.function != NULL && priv->timer_type != TLAN_TIMER_ACTIVITY) { - if (!in_irq()) - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); return; } priv->timer.function = tlan_timer; - if (!in_irq()) - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&priv->lock, flags); priv->timer_set_at = jiffies; priv->timer_type = type; @@ -1703,22 +1703,22 @@ static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int) dev->name, (unsigned) net_sts); } if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) { - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts); - tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); + __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts); + __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); if (!(tlphy_sts & TLAN_TS_POLOK) && !(tlphy_ctl & TLAN_TC_SWAPOL)) { tlphy_ctl |= TLAN_TC_SWAPOL; - tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, - tlphy_ctl); + __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, + tlphy_ctl); } else if ((tlphy_sts & TLAN_TS_POLOK) && (tlphy_ctl & TLAN_TC_SWAPOL)) { tlphy_ctl &= ~TLAN_TC_SWAPOL; - tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, - tlphy_ctl); + __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, + tlphy_ctl); } if (debug) - tlan_phy_print(dev); + __tlan_phy_print(dev); } } @@ -2379,7 +2379,7 @@ ThunderLAN driver PHY layer routines /********************************************************************* - * tlan_phy_print + * __tlan_phy_print * * Returns: * Nothing @@ -2391,11 +2391,13 @@ ThunderLAN driver PHY layer routines * ********************************************************************/ -static void tlan_phy_print(struct net_device *dev) +static void __tlan_phy_print(struct net_device *dev) { struct tlan_priv *priv = netdev_priv(dev); u16 i, data0, data1, data2, data3, phy; + lockdep_assert_held(&priv->lock); + phy = priv->phy[priv->phy_num]; if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { @@ -2404,10 +2406,10 @@ static void tlan_phy_print(struct net_device *dev) netdev_info(dev, "PHY 0x%02x\n", phy); pr_info(" Off. +0 +1 +2 +3\n"); for (i = 0; i < 0x20; i += 4) { - tlan_mii_read_reg(dev, phy, i, &data0); - tlan_mii_read_reg(dev, phy, i + 1, &data1); - tlan_mii_read_reg(dev, phy, i + 2, &data2); - tlan_mii_read_reg(dev, phy, i + 3, &data3); + __tlan_mii_read_reg(dev, phy, i, &data0); + __tlan_mii_read_reg(dev, phy, i + 1, &data1); + __tlan_mii_read_reg(dev, phy, i + 2, &data2); + __tlan_mii_read_reg(dev, phy, i + 3, &data3); pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n", i, data0, data1, data2, data3); } @@ -2417,7 +2419,15 @@ static void tlan_phy_print(struct net_device *dev) } +static void tlan_phy_print(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); + __tlan_phy_print(dev); + spin_unlock_irqrestore(&priv->lock, flags); +} /********************************************************************* @@ -2795,7 +2805,7 @@ these routines are based on the information in chap. 2 of the /*************************************************************** - * tlan_mii_read_reg + * __tlan_mii_read_reg * * Returns: * false if ack received ok @@ -2819,7 +2829,7 @@ these routines are based on the information in chap. 2 of the **************************************************************/ static bool -tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) +__tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) { u8 nack; u16 sio, tmp; @@ -2827,15 +2837,13 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) bool err; int minten; struct tlan_priv *priv = netdev_priv(dev); - unsigned long flags = 0; + + lockdep_assert_held(&priv->lock); err = false; outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; - if (!in_irq()) - spin_lock_irqsave(&priv->lock, flags); - tlan_mii_sync(dev->base_addr); minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); @@ -2881,15 +2889,19 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) *val = tmp; - if (!in_irq()) - spin_unlock_irqrestore(&priv->lock, flags); - return err; - } +static void tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, + u16 *val) +{ + struct tlan_priv *priv = netdev_priv(dev); + unsigned long flags; - + spin_lock_irqsave(&priv->lock, flags); + __tlan_mii_read_reg(dev, phy, reg, val); + spin_unlock_irqrestore(&priv->lock, flags); +} /*************************************************************** * tlan_mii_send_data @@ -2971,7 +2983,7 @@ static void tlan_mii_sync(u16 base_port) /*************************************************************** - * tlan_mii_write_reg + * __tlan_mii_write_reg * * Returns: * Nothing @@ -2991,19 +3003,17 @@ static void tlan_mii_sync(u16 base_port) **************************************************************/ static void -tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) +__tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) { u16 sio; int minten; - unsigned long flags = 0; struct tlan_priv *priv = netdev_priv(dev); + lockdep_assert_held(&priv->lock); + outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; - if (!in_irq()) - spin_lock_irqsave(&priv->lock, flags); - tlan_mii_sync(dev->base_addr); minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); @@ -3024,12 +3034,18 @@ tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) if (minten) tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); - if (!in_irq()) - spin_unlock_irqrestore(&priv->lock, flags); - } +static void +tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) +{ + struct tlan_priv *priv = netdev_priv(dev); + unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); + __tlan_mii_write_reg(dev, phy, reg, val); + spin_unlock_irqrestore(&priv->lock, flags); +} /***************************************************************************** diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index d0d0d4fe9d40..3b2137d1f4c6 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_XILINX config XILINX_EMACLITE tristate "Xilinx 10/100 Ethernet Lite support" - depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS + depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || COMPILE_TEST select PHYLIB help This driver supports the 10/100 Ethernet Lite from Xilinx. diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index f34c7903ff52..a03c3ca1b28d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -378,6 +378,7 @@ struct axidma_bd { * @dev: Pointer to device structure * @phy_node: Pointer to device node structure * @mii_bus: Pointer to MII bus structure + * @mii_clk_div: MII bus clock divider value * @regs_start: Resource start for axienet device addresses * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space @@ -419,11 +420,15 @@ struct axienet_local { struct phylink *phylink; struct phylink_config phylink_config; + /* Reference to PCS/PMA PHY if used */ + struct mdio_device *pcs_phy; + /* Clock for AXI bus */ struct clk *clk; /* MDIO bus data */ struct mii_bus *mii_bus; /* MII bus reference */ + u8 mii_clk_div; /* MII bus clock divider value */ /* IO registers, dma functions and IRQs */ resource_size_t regs_start; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 9aafd3ecdaa4..6fea980acf64 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1049,20 +1049,13 @@ static int axienet_open(struct net_device *ndev) dev_dbg(&ndev->dev, "axienet_open()\n"); - /* Disable the MDIO interface till Axi Ethernet Reset is completed. - * When we do an Axi Ethernet reset, it resets the complete core - * including the MDIO. MDIO must be disabled before resetting - * and re-enabled afterwards. + /* When we do an Axi Ethernet reset, it resets the complete core + * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. */ mutex_lock(&lp->mii_bus->mdio_lock); - axienet_mdio_disable(lp); ret = axienet_device_reset(ndev); - if (ret == 0) - ret = axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); - if (ret < 0) - return ret; ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); if (ret) { @@ -1156,9 +1149,7 @@ static int axienet_stop(struct net_device *ndev) /* Do a reset to ensure DMA is really stopped */ mutex_lock(&lp->mii_bus->mdio_lock); - axienet_mdio_disable(lp); __axienet_device_reset(lp); - axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); cancel_work_sync(&lp->dma_err_task); @@ -1517,10 +1508,27 @@ static void axienet_validate(struct phylink_config *config, phylink_set(mask, Asym_Pause); phylink_set(mask, Pause); - phylink_set(mask, 1000baseX_Full); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Full); + + switch (state->interface) { + case PHY_INTERFACE_MODE_NA: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 1000baseT_Full); + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + break; + fallthrough; + case PHY_INTERFACE_MODE_MII: + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 10baseT_Full); + default: + break; + } bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -1533,38 +1541,46 @@ static void axienet_mac_pcs_get_state(struct phylink_config *config, { struct net_device *ndev = to_net_dev(config->dev); struct axienet_local *lp = netdev_priv(ndev); - u32 emmc_reg, fcc_reg; - - state->interface = lp->phy_mode; - emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); - if (emmc_reg & XAE_EMMC_LINKSPD_1000) - state->speed = SPEED_1000; - else if (emmc_reg & XAE_EMMC_LINKSPD_100) - state->speed = SPEED_100; - else - state->speed = SPEED_10; - - state->pause = 0; - fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET); - if (fcc_reg & XAE_FCC_FCTX_MASK) - state->pause |= MLO_PAUSE_TX; - if (fcc_reg & XAE_FCC_FCRX_MASK) - state->pause |= MLO_PAUSE_RX; - - state->an_complete = 0; - state->duplex = 1; + switch (state->interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + phylink_mii_c22_pcs_get_state(lp->pcs_phy, state); + break; + default: + break; + } } static void axienet_mac_an_restart(struct phylink_config *config) { - /* Unsupported, do nothing */ + struct net_device *ndev = to_net_dev(config->dev); + struct axienet_local *lp = netdev_priv(ndev); + + phylink_mii_c22_pcs_an_restart(lp->pcs_phy); } static void axienet_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - /* nothing meaningful to do */ + struct net_device *ndev = to_net_dev(config->dev); + struct axienet_local *lp = netdev_priv(ndev); + int ret; + + switch (state->interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode, + state->interface, + state->advertising); + if (ret < 0) + netdev_warn(ndev, "Failed to configure PCS: %d\n", + ret); + break; + + default: + break; + } } static void axienet_mac_link_down(struct phylink_config *config, @@ -1644,16 +1660,12 @@ static void axienet_dma_err_handler(struct work_struct *work) axienet_setoptions(ndev, lp->options & ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); - /* Disable the MDIO interface till Axi Ethernet Reset is completed. - * When we do an Axi Ethernet reset, it resets the complete core - * including the MDIO. MDIO must be disabled before resetting - * and re-enabled afterwards. + /* When we do an Axi Ethernet reset, it resets the complete core + * including the MDIO. MDIO must be disabled before resetting. * Hold MDIO bus lock to avoid MDIO accesses during the reset. */ mutex_lock(&lp->mii_bus->mdio_lock); - axienet_mdio_disable(lp); __axienet_device_reset(lp); - axienet_mdio_enable(lp); mutex_unlock(&lp->mii_bus->mdio_lock); for (i = 0; i < lp->tx_bd_num; i++) { @@ -1999,6 +2011,20 @@ static int axienet_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "error registering MDIO bus: %d\n", ret); } + if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || + lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { + if (!lp->phy_node) { + dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n"); + ret = -EINVAL; + goto free_netdev; + } + lp->pcs_phy = of_mdio_find_device(lp->phy_node); + if (!lp->pcs_phy) { + ret = -EPROBE_DEFER; + goto free_netdev; + } + lp->phylink_config.pcs_poll = true; + } lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; @@ -2036,6 +2062,9 @@ static int axienet_remove(struct platform_device *pdev) if (lp->phylink) phylink_destroy(lp->phylink); + if (lp->pcs_phy) + put_device(&lp->pcs_phy->dev); + axienet_mdio_teardown(lp); clk_disable_unprepare(lp->clk); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index 435ed308d990..9c014cee34b2 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -30,6 +30,23 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp) 1, 20000); } +/* Enable the MDIO MDC. Called prior to a read/write operation */ +static void axienet_mdio_mdc_enable(struct axienet_local *lp) +{ + axienet_iow(lp, XAE_MDIO_MC_OFFSET, + ((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK)); +} + +/* Disable the MDIO MDC. Called after a read/write operation*/ +static void axienet_mdio_mdc_disable(struct axienet_local *lp) +{ + u32 mc_reg; + + mc_reg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); + axienet_iow(lp, XAE_MDIO_MC_OFFSET, + (mc_reg & ~XAE_MDIO_MC_MDIOEN_MASK)); +} + /** * axienet_mdio_read - MDIO interface read function * @bus: Pointer to mii bus structure @@ -48,9 +65,13 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg) int ret; struct axienet_local *lp = bus->priv; + axienet_mdio_mdc_enable(lp); + ret = axienet_mdio_wait_until_ready(lp); - if (ret < 0) + if (ret < 0) { + axienet_mdio_mdc_disable(lp); return ret; + } axienet_iow(lp, XAE_MDIO_MCR_OFFSET, (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & @@ -61,14 +82,17 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg) XAE_MDIO_MCR_OP_READ_MASK)); ret = axienet_mdio_wait_until_ready(lp); - if (ret < 0) + if (ret < 0) { + axienet_mdio_mdc_disable(lp); return ret; + } rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF; dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n", phy_id, reg, rc); + axienet_mdio_mdc_disable(lp); return rc; } @@ -94,9 +118,13 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg, dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n", phy_id, reg, val); + axienet_mdio_mdc_enable(lp); + ret = axienet_mdio_wait_until_ready(lp); - if (ret < 0) + if (ret < 0) { + axienet_mdio_mdc_disable(lp); return ret; + } axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val); axienet_iow(lp, XAE_MDIO_MCR_OFFSET, @@ -108,8 +136,11 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg, XAE_MDIO_MCR_OP_WRITE_MASK)); ret = axienet_mdio_wait_until_ready(lp); - if (ret < 0) + if (ret < 0) { + axienet_mdio_mdc_disable(lp); return ret; + } + axienet_mdio_mdc_disable(lp); return 0; } @@ -124,7 +155,9 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg, **/ int axienet_mdio_enable(struct axienet_local *lp) { - u32 clk_div, host_clock; + u32 host_clock; + + lp->mii_clk_div = 0; if (lp->clk) { host_clock = clk_get_rate(lp->clk); @@ -176,19 +209,19 @@ int axienet_mdio_enable(struct axienet_local *lp) * "clock-frequency" from the CPU */ - clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1; + lp->mii_clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1; /* If there is any remainder from the division of * fHOST / (MAX_MDIO_FREQ * 2), then we need to add * 1 to the clock divisor or we will surely be above 2.5 MHz */ if (host_clock % (MAX_MDIO_FREQ * 2)) - clk_div++; + lp->mii_clk_div++; netdev_dbg(lp->ndev, "Setting MDIO clock divisor to %u/%u Hz host clock.\n", - clk_div, host_clock); + lp->mii_clk_div, host_clock); - axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK); + axienet_iow(lp, XAE_MDIO_MC_OFFSET, lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK); return axienet_mdio_wait_until_ready(lp); } @@ -211,8 +244,8 @@ void axienet_mdio_disable(struct axienet_local *lp) * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when * mdiobus_alloc (to allocate memory for mii bus structure) fails. * - * Sets up the MDIO interface by initializing the MDIO clock and enabling the - * MDIO interface in hardware. Register the MDIO interface. + * Sets up the MDIO interface by initializing the MDIO clock. + * Register the MDIO interface. **/ int axienet_mdio_setup(struct axienet_local *lp) { @@ -246,6 +279,7 @@ int axienet_mdio_setup(struct axienet_local *lp) lp->mii_bus = NULL; return ret; } + axienet_mdio_mdc_disable(lp); return 0; } diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 0c26f5bcc523..008b9a40faad 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -97,7 +97,7 @@ #define ALIGNMENT 4 /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ -#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT) +#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT) #ifdef __BIG_ENDIAN #define xemaclite_readl ioread32be @@ -338,7 +338,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * if it is configured in HW */ - addr = (void __iomem __force *)((u32 __force)addr ^ + addr = (void __iomem __force *)((uintptr_t __force)addr ^ XEL_BUFFER_OFFSET); reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); @@ -399,8 +399,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) * will correct on subsequent calls */ if (drvdata->rx_ping_pong != 0) - addr = (void __iomem __force *)((u32 __force)addr ^ - XEL_BUFFER_OFFSET); + addr = (void __iomem __force *) + ((uintptr_t __force)addr ^ + XEL_BUFFER_OFFSET); else return 0; /* No data was available */ @@ -518,6 +519,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address) /** * xemaclite_tx_timeout - Callback for Tx Timeout * @dev: Pointer to the network device + * @txqueue: Unused * * This function is called when Tx time out occurs for Emaclite device. */ @@ -1191,9 +1193,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev) } dev_info(dev, - "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n", + "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n", (unsigned int __force)ndev->mem_start, - (unsigned int __force)lp->base_addr, ndev->irq); + (unsigned long __force)lp->base_addr, ndev->irq); return 0; error: diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c index cc9ac572423e..e9b9614639cd 100644 --- a/drivers/net/fddi/skfp/drvfbi.c +++ b/drivers/net/fddi/skfp/drvfbi.c @@ -22,10 +22,6 @@ #include <linux/bitrev.h> #include <linux/pci.h> -#ifndef lint -static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ; -#endif - /* * PCM active state */ diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c index 15c503f43727..2f5f5f26bb43 100644 --- a/drivers/net/fddi/skfp/ecm.c +++ b/drivers/net/fddi/skfp/ecm.c @@ -40,10 +40,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ; -#endif - /* * FSM Macros */ @@ -147,10 +143,11 @@ static void ecm_fsm(struct s_smc *smc, int cmd) /* For AIX event notification: */ /* Is a disconnect command remotely issued ? */ if (cmd == EC_DISCONNECT && - smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) + smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) { AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long) FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc), smt_get_error_word(smc) ); + } /*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/ if (cmd == EC_CONNECT) { diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c index afd5ca39f43b..35110c0c00a0 100644 --- a/drivers/net/fddi/skfp/ess.c +++ b/drivers/net/fddi/skfp/ess.c @@ -40,7 +40,6 @@ #ifdef ESS #ifndef lint -static const char ID_sccs[] = "@(#)ess.c 1.10 96/02/23 (C) SK" ; #define LINT_USE(x) #else #define LINT_USE(x) (x)=(x) diff --git a/drivers/net/fddi/skfp/hwt.c b/drivers/net/fddi/skfp/hwt.c index 32804ed049cd..5577b8e14b73 100644 --- a/drivers/net/fddi/skfp/hwt.c +++ b/drivers/net/fddi/skfp/hwt.c @@ -27,10 +27,6 @@ #include "h/fddi.h" #include "h/smc.h" -#ifndef lint -static const char ID_sccs[] = "@(#)hwt.c 1.13 97/04/23 (C) SK " ; -#endif - /* * Prototypes of local functions. */ diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 554cde8d6073..90e8df6d9a88 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -45,10 +45,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ; -#endif - #ifdef FDDI_MIB extern int snmp_fddi_trap( #ifdef ANSIC diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c index 14f10b4cab0f..563fb7f0b327 100644 --- a/drivers/net/fddi/skfp/pmf.c +++ b/drivers/net/fddi/skfp/pmf.c @@ -24,10 +24,6 @@ #ifndef SLIM_SMT -#ifndef lint -static const char ID_sccs[] = "@(#)pmf.c 1.37 97/08/04 (C) SK " ; -#endif - static int smt_authorize(struct s_smc *smc, struct smt_header *sm); static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm); static const struct s_p_tab* smt_get_ptab(u_short para); diff --git a/drivers/net/fddi/skfp/queue.c b/drivers/net/fddi/skfp/queue.c index ba022f723bd7..abe155ad777f 100644 --- a/drivers/net/fddi/skfp/queue.c +++ b/drivers/net/fddi/skfp/queue.c @@ -18,10 +18,6 @@ #include "h/fddi.h" #include "h/smc.h" -#ifndef lint -static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ; -#endif - #define PRINTF(a,b,c) /* diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c index c0e62c25332c..37a89675dbeb 100644 --- a/drivers/net/fddi/skfp/rmt.c +++ b/drivers/net/fddi/skfp/rmt.c @@ -45,10 +45,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ; -#endif - /* * FSM Macros */ diff --git a/drivers/net/fddi/skfp/smtdef.c b/drivers/net/fddi/skfp/smtdef.c index 0bebde3c6cb9..99cc9a549bd7 100644 --- a/drivers/net/fddi/skfp/smtdef.c +++ b/drivers/net/fddi/skfp/smtdef.c @@ -22,10 +22,6 @@ #define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata" #endif -#ifndef lint -static const char ID_sccs[] = "@(#)smtdef.c 2.53 99/08/11 (C) SK " ; -#endif - /* * defaults */ diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c index 01f6c75cbea8..c9898c83fe30 100644 --- a/drivers/net/fddi/skfp/smtinit.c +++ b/drivers/net/fddi/skfp/smtinit.c @@ -19,10 +19,6 @@ #include "h/fddi.h" #include "h/smc.h" -#ifndef lint -static const char ID_sccs[] = "@(#)smtinit.c 1.15 97/05/06 (C) SK " ; -#endif - void init_fddi_driver(struct s_smc *smc, u_char *mac_addr); /* define global debug variable */ diff --git a/drivers/net/fddi/skfp/smttimer.c b/drivers/net/fddi/skfp/smttimer.c index 9d549bb14f07..5f3e5d7bf415 100644 --- a/drivers/net/fddi/skfp/smttimer.c +++ b/drivers/net/fddi/skfp/smttimer.c @@ -18,10 +18,6 @@ #include "h/fddi.h" #include "h/smc.h" -#ifndef lint -static const char ID_sccs[] = "@(#)smttimer.c 2.4 97/08/04 (C) SK " ; -#endif - static void timer_done(struct s_smc *smc, int restart); void smt_timer_init(struct s_smc *smc) diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c index f98d060b0f5b..4cad68c3f49b 100644 --- a/drivers/net/fddi/skfp/srf.c +++ b/drivers/net/fddi/skfp/srf.c @@ -26,11 +26,6 @@ #ifndef SLIM_SMT #ifndef BOOT -#ifndef lint -static const char ID_sccs[] = "@(#)srf.c 1.18 97/08/04 (C) SK " ; -#endif - - /* * function declarations */ diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 1426bfc009bc..5523f069b9a5 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/etherdevice.h> @@ -1137,7 +1138,7 @@ static const struct net_device_ops geneve_netdev_ops = { .ndo_open = geneve_open, .ndo_stop = geneve_stop, .ndo_start_xmit = geneve_xmit, - .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = geneve_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index dc668ed280b9..4c04e271f184 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -607,7 +607,7 @@ static const struct net_device_ops gtp_netdev_ops = { .ndo_init = gtp_dev_init, .ndo_uninit = gtp_dev_uninit, .ndo_start_xmit = gtp_dev_xmit, - .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, }; static void gtp_link_setup(struct net_device *dev) diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index e7413a643929..9e0058154ac3 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -597,7 +597,7 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case HDLCDRVCTL_DRIVERNAME: if (s->ops && s->ops->drvname) { - strncpy(bi.data.drivername, s->ops->drvname, + strlcpy(bi.data.drivername, s->ops->drvname, sizeof(bi.data.drivername)); break; } diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0c3de94b5178..fa8341f8359a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1499,7 +1499,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, u64_stats_init(&nvchan->tx_stats.syncp); u64_stats_init(&nvchan->rx_stats.syncp); - ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i); + ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); if (ret) { netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 261e6e55a907..d17bbc75f5e7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/atomic.h> +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/device.h> diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index b22e47bcfeca..2c2b55c32a7a 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -6,6 +6,7 @@ * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> */ +#include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 4eb64709d44c..3a2824f24caa 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -316,6 +316,7 @@ struct cas_control { * struct ca8210_test - ca8210 test interface structure * @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device * @up_fifo: fifo for upstream messages + * @readq: read wait queue * * This structure stores all the data pertaining to the debug interface */ @@ -346,12 +347,12 @@ struct ca8210_test { * @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms * @sync_down: counts number of downstream synchronous commands * @sync_up: counts number of upstream synchronous commands - * @spi_transfer_complete completion object for a single spi_transfer - * @sync_exchange_complete completion object for a complete synchronous API - * exchange - * @promiscuous whether the ca8210 is in promiscuous mode or not + * @spi_transfer_complete: completion object for a single spi_transfer + * @sync_exchange_complete: completion object for a complete synchronous API + * exchange + * @promiscuous: whether the ca8210 is in promiscuous mode or not * @retries: records how many times the current pending spi - * transfer has been retried + * transfer has been retried */ struct ca8210_priv { struct spi_device *spi; @@ -420,8 +421,8 @@ struct fulladdr { /** * union macaddr: generic MAC address container - * @short_addr: 16-bit short address - * @ieee_address: 64-bit extended address as LE byte array + * @short_address: 16-bit short address + * @ieee_address: 64-bit extended address as LE byte array * */ union macaddr { @@ -714,7 +715,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work) /** * ca8210_rx_done() - Calls various message dispatches responding to a received * command - * @arg: Pointer to the cas_control object for the relevant spi transfer + * @cas_ctl: Pointer to the cas_control object for the relevant spi transfer * * Presents a received SAP command from the ca8210 to the Cascoda EVBME, test * interface and network driver. @@ -1277,7 +1278,6 @@ static u8 tdme_channelinit(u8 channel, void *device_ref) * @pib_attribute: Attribute Number * @pib_attribute_length: Attribute length * @pib_attribute_value: Pointer to Attribute Value - * @device_ref: Nondescript pointer to target device * * Return: 802.15.4 status code of checks */ @@ -3046,7 +3046,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv) /** * ca8210_remove() - Shut down a ca8210 upon being disconnected - * @priv: Pointer to private data structure + * @spi_device: Pointer to spi device data structure * * Return: 0 or linux error code */ @@ -3096,7 +3096,7 @@ static int ca8210_remove(struct spi_device *spi_device) /** * ca8210_probe() - Set up a connected ca8210 upon being detected by the system - * @priv: Pointer to private data structure + * @spi_device: Pointer to spi device data structure * * Return: 0 or linux error code */ diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 7fe306e76281..fa63d4dee0ba 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -187,8 +187,7 @@ static const struct net_device_ops ifb_netdev_ops = { }; #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ - NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ - NETIF_F_GSO_ENCAP_ALL | \ + NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_TX) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 6bfac1efe037..c4795249719d 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -21,6 +21,7 @@ #include "gsi_trans.h" #include "ipa_gsi.h" #include "ipa_data.h" +#include "ipa_version.h" /** * DOC: The IPA Generic Software Interface @@ -91,6 +92,7 @@ #define GSI_CMD_TIMEOUT 5 /* seconds */ #define GSI_CHANNEL_STOP_RX_RETRIES 10 +#define GSI_CHANNEL_MODEM_HALT_RETRIES 10 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ @@ -108,62 +110,6 @@ struct gsi_event { u8 chid; }; -/* Hardware values from the error log register error code field */ -enum gsi_err_code { - GSI_INVALID_TRE_ERR = 0x1, - GSI_OUT_OF_BUFFERS_ERR = 0x2, - GSI_OUT_OF_RESOURCES_ERR = 0x3, - GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, - GSI_EVT_RING_EMPTY_ERR = 0x5, - GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6, - GSI_HWO_1_ERR = 0x8, -}; - -/* Hardware values from the error log register error type field */ -enum gsi_err_type { - GSI_ERR_TYPE_GLOB = 0x1, - GSI_ERR_TYPE_CHAN = 0x2, - GSI_ERR_TYPE_EVT = 0x3, -}; - -/* Hardware values used when programming an event ring */ -enum gsi_evt_chtype { - GSI_EVT_CHTYPE_MHI_EV = 0x0, - GSI_EVT_CHTYPE_XHCI_EV = 0x1, - GSI_EVT_CHTYPE_GPI_EV = 0x2, - GSI_EVT_CHTYPE_XDCI_EV = 0x3, -}; - -/* Hardware values used when programming a channel */ -enum gsi_channel_protocol { - GSI_CHANNEL_PROTOCOL_MHI = 0x0, - GSI_CHANNEL_PROTOCOL_XHCI = 0x1, - GSI_CHANNEL_PROTOCOL_GPI = 0x2, - GSI_CHANNEL_PROTOCOL_XDCI = 0x3, -}; - -/* Hardware values representing an event ring immediate command opcode */ -enum gsi_evt_cmd_opcode { - GSI_EVT_ALLOCATE = 0x0, - GSI_EVT_RESET = 0x9, - GSI_EVT_DE_ALLOC = 0xa, -}; - -/* Hardware values representing a generic immediate command opcode */ -enum gsi_generic_cmd_opcode { - GSI_GENERIC_HALT_CHANNEL = 0x1, - GSI_GENERIC_ALLOCATE_CHANNEL = 0x2, -}; - -/* Hardware values representing a channel immediate command opcode */ -enum gsi_ch_cmd_opcode { - GSI_CH_ALLOCATE = 0x0, - GSI_CH_START = 0x1, - GSI_CH_STOP = 0x2, - GSI_CH_RESET = 0x9, - GSI_CH_DE_ALLOC = 0xa, -}; - /** gsi_channel_scratch_gpi - GPI protocol scratch register * @max_outstanding_tre: * Defines the maximum number of TREs allowed in a single transaction @@ -229,21 +175,76 @@ static u32 gsi_channel_id(struct gsi_channel *channel) return channel - &channel->gsi->channel[0]; } +/* Update the GSI IRQ type register with the cached value */ +static void gsi_irq_type_update(struct gsi *gsi, u32 val) +{ + gsi->type_enabled_bitmap = val; + iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); +} + +static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) +{ + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); +} + +static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) +{ + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); +} + +/* Turn off all GSI interrupts initially */ +static void gsi_irq_setup(struct gsi *gsi) +{ + u32 adjust; + + /* Disable all interrupt types */ + gsi_irq_type_update(gsi, 0); + + /* Clear all type-specific interrupt masks */ + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); + + /* Reverse the offset adjustment for inter-EE register offsets */ + adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; + iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET); + iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET); + + iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); +} + +/* Turn off all GSI interrupts when we're all done */ +static void gsi_irq_teardown(struct gsi *gsi) +{ + /* Nothing to do */ +} + static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) { + bool enable_ieob = !gsi->ieob_enabled_bitmap; u32 val; - gsi->event_enable_bitmap |= BIT(evt_ring_id); - val = gsi->event_enable_bitmap; + gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); + val = gsi->ieob_enabled_bitmap; iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); + + /* Enable the interrupt type if this is the first channel enabled */ + if (enable_ieob) + gsi_irq_type_enable(gsi, GSI_IEOB); } static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) { u32 val; - gsi->event_enable_bitmap &= ~BIT(evt_ring_id); - val = gsi->event_enable_bitmap; + gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id); + + /* Disable the interrupt type if this was the last enabled channel */ + if (!gsi->ieob_enabled_bitmap) + gsi_irq_type_disable(gsi, GSI_IEOB); + + val = gsi->ieob_enabled_bitmap; iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); } @@ -252,38 +253,32 @@ static void gsi_irq_enable(struct gsi *gsi) { u32 val; - /* We don't use inter-EE channel or event interrupts */ - val = GSI_CNTXT_TYPE_IRQ_MSK_ALL; - val &= ~INTER_EE_CH_CTRL_FMASK; - val &= ~INTER_EE_EV_CTRL_FMASK; - iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); - - val = GENMASK(gsi->channel_count - 1, 0); - iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - - val = GENMASK(gsi->evt_ring_count - 1, 0); - iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - - /* Each IEOB interrupt is enabled (later) as needed by channels */ - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); - - val = GSI_CNTXT_GLOB_IRQ_ALL; - iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + /* Global interrupts include hardware error reports. Enable + * that so we can at least report the error should it occur. + */ + iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); - /* Never enable GSI_BREAK_POINT */ - val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK; + /* General GSI interrupts are reported to all EEs; if they occur + * they are unrecoverable (without reset). A breakpoint interrupt + * also exists, but we don't support that. We want to be notified + * of errors so we can report them, even if they can't be handled. + */ + val = BIT(BUS_ERROR); + val |= BIT(CMD_FIFO_OVRFLOW); + val |= BIT(MCS_STACK_OVRFLOW); iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); } -/* Disable all GSI_interrupt types */ +/* Disable all GSI interrupt types */ static void gsi_irq_disable(struct gsi *gsi) { + gsi_irq_type_update(gsi, 0); + + /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); } /* Return the virtual address associated with a ring index */ @@ -337,13 +332,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; struct completion *completion = &evt_ring->completion; struct device *dev = gsi->dev; + bool success; u32 val; + /* We only perform one event ring command at a time, and event + * control interrupts should only occur when such a command + * is issued here. Only permit *this* event ring to trigger + * an interrupt, and only enable the event control IRQ type + * when we expect it to occur. + */ + val = BIT(evt_ring_id); + iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + gsi_irq_type_enable(gsi, GSI_EV_CTRL); + val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); - if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion)) - return 0; /* Success! */ + success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); + + /* Disable the interrupt again */ + gsi_irq_type_disable(gsi, GSI_EV_CTRL); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + + if (success) + return 0; dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", opcode, evt_ring_id, evt_ring->state); @@ -360,15 +372,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) /* Get initial event ring state */ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state %u before alloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", + evt_ring_id, evt_ring->state); return -EINVAL; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state %u after alloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", + evt_ring_id, evt_ring->state); ret = -EIO; } @@ -384,15 +396,15 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) if (state != GSI_EVT_RING_STATE_ALLOCATED && state != GSI_EVT_RING_STATE_ERROR) { - dev_err(gsi->dev, "bad event ring state %u before reset\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u before reset\n", + evt_ring_id, evt_ring->state); return; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) - dev_err(gsi->dev, "bad event ring state %u after reset\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u after reset\n", + evt_ring_id, evt_ring->state); } /* Issue a hardware de-allocation request for an allocated event ring */ @@ -402,15 +414,15 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) int ret; if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state %u before dealloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u state %u before dealloc\n", + evt_ring_id, evt_ring->state); return; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) - dev_err(gsi->dev, "bad event ring state %u after dealloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", + evt_ring_id, evt_ring->state); } /* Fetch the current state of a channel from hardware */ @@ -433,13 +445,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) u32 channel_id = gsi_channel_id(channel); struct gsi *gsi = channel->gsi; struct device *dev = gsi->dev; + bool success; u32 val; + /* We only perform one channel command at a time, and channel + * control interrupts should only occur when such a command is + * issued here. So we only permit *this* channel to trigger + * an interrupt and only enable the channel control IRQ type + * when we expect it to occur. + */ + val = BIT(channel_id); + iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + gsi_irq_type_enable(gsi, GSI_CH_CTRL); + val = u32_encode_bits(channel_id, CH_CHID_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); + success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); - if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) - return 0; /* Success! */ + /* Disable the interrupt again */ + gsi_irq_type_disable(gsi, GSI_CH_CTRL); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + + if (success) + return 0; dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", opcode, channel_id, gsi_channel_state(channel)); @@ -458,7 +486,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) /* Get initial channel state */ state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { - dev_err(dev, "bad channel state %u before alloc\n", state); + dev_err(dev, "channel %u bad state %u before alloc\n", + channel_id, state); return -EINVAL; } @@ -467,7 +496,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(dev, "bad channel state %u after alloc\n", state); + dev_err(dev, "channel %u bad state %u after alloc\n", + channel_id, state); ret = -EIO; } @@ -484,7 +514,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED && state != GSI_CHANNEL_STATE_STOPPED) { - dev_err(dev, "bad channel state %u before start\n", state); + dev_err(dev, "channel %u bad state %u before start\n", + gsi_channel_id(channel), state); return -EINVAL; } @@ -493,7 +524,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_STARTED) { - dev_err(dev, "bad channel state %u after start\n", state); + dev_err(dev, "channel %u bad state %u after start\n", + gsi_channel_id(channel), state); ret = -EIO; } @@ -517,7 +549,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) if (state != GSI_CHANNEL_STATE_STARTED && state != GSI_CHANNEL_STATE_STOP_IN_PROC) { - dev_err(dev, "bad channel state %u before stop\n", state); + dev_err(dev, "channel %u bad state %u before stop\n", + gsi_channel_id(channel), state); return -EINVAL; } @@ -532,7 +565,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) return -EAGAIN; - dev_err(dev, "bad channel state %u after stop\n", state); + dev_err(dev, "channel %u bad state %u after stop\n", + gsi_channel_id(channel), state); return -EIO; } @@ -549,7 +583,10 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_STOPPED && state != GSI_CHANNEL_STATE_ERROR) { - dev_err(dev, "bad channel state %u before reset\n", state); + /* No need to reset a channel already in ALLOCATED state */ + if (state != GSI_CHANNEL_STATE_ALLOCATED) + dev_err(dev, "channel %u bad state %u before reset\n", + gsi_channel_id(channel), state); return; } @@ -558,7 +595,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) - dev_err(dev, "bad channel state %u after reset\n", state); + dev_err(dev, "channel %u bad state %u after reset\n", + gsi_channel_id(channel), state); } /* Deallocate an ALLOCATED GSI channel */ @@ -571,7 +609,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(dev, "bad channel state %u before dealloc\n", state); + dev_err(dev, "channel %u bad state %u before dealloc\n", + channel_id, state); return; } @@ -580,7 +619,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED) - dev_err(dev, "bad channel state %u after dealloc\n", state); + dev_err(dev, "channel %u bad state %u after dealloc\n", + channel_id, state); } /* Ring an event ring doorbell, reporting the last entry processed by the AP. @@ -607,7 +647,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; u32 val; - val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK); + /* We program all event rings as GPI type/protocol */ + val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); val |= EV_INTYPE_FMASK; val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); @@ -714,8 +755,8 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* Arbitrarily pick TRE 0 as the first channel element to use */ channel->tre_ring.index = 0; - /* We program all channels to use GPI protocol */ - val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK); + /* We program all channels as GPI type/protocol */ + val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); if (channel->toward_ipa) val |= CHTYPE_DIR_FMASK; val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); @@ -742,12 +783,21 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ - /* Enable the doorbell engine if requested */ - if (doorbell) + /* We enable the doorbell engine for IPA v3.5.1 */ + if (gsi->version == IPA_VERSION_3_5_1 && doorbell) val |= USE_DB_ENG_FMASK; - if (!channel->use_prefetch) - val |= USE_ESCAPE_BUF_ONLY_FMASK; + /* v4.0 introduces an escape buffer for prefetch. We use it + * on all but the AP command channel. + */ + if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { + /* If not otherwise set, prefetch buffers are used */ + if (gsi->version < IPA_VERSION_4_5) + val |= USE_ESCAPE_BUF_ONLY_FMASK; + else + val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, + PREFETCH_MODE_FMASK); + } iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); @@ -829,8 +879,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) return ret; } -/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */ -void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) +/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ +void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) { struct gsi_channel *channel = &gsi->channel[channel_id]; @@ -838,10 +888,10 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) gsi_channel_reset_command(channel); /* Due to a hardware quirk we may need to reset RX channels twice. */ - if (legacy && !channel->toward_ipa) + if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) gsi_channel_reset_command(channel); - gsi_channel_program(channel, legacy); + gsi_channel_program(channel, doorbell); gsi_channel_trans_cancel_pending(channel); mutex_unlock(&gsi->mutex); @@ -989,7 +1039,7 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi) static void gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) { - if (code == GSI_OUT_OF_RESOURCES_ERR) { + if (code == GSI_OUT_OF_RESOURCES) { dev_err(gsi->dev, "channel %u out of resources\n", channel_id); complete(&gsi->channel[channel_id].completion); return; @@ -1004,7 +1054,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) static void gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) { - if (code == GSI_OUT_OF_RESOURCES_ERR) { + if (code == GSI_OUT_OF_RESOURCES) { struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; u32 channel_id = gsi_channel_id(evt_ring->channel); @@ -1034,8 +1084,8 @@ static void gsi_isr_glob_err(struct gsi *gsi) iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); ee = u32_get_bits(val, ERR_EE_FMASK); - which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); type = u32_get_bits(val, ERR_TYPE_FMASK); + which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); code = u32_get_bits(val, ERR_CODE_FMASK); if (type == GSI_ERR_TYPE_CHAN) @@ -1052,10 +1102,38 @@ static void gsi_isr_gp_int1(struct gsi *gsi) u32 result; u32 val; + /* This interrupt is used to handle completions of the two GENERIC + * GSI commands. We use these to allocate and halt channels on + * the modem's behalf due to a hardware quirk on IPA v4.2. Once + * allocated, the modem "owns" these channels, and as a result we + * have no way of knowing the channel's state at any given time. + * + * It is recommended that we halt the modem channels we allocated + * when shutting down, but it's possible the channel isn't running + * at the time we issue the HALT command. We'll get an error in + * that case, but it's harmless (the channel is already halted). + * + * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error + * if we receive it. + */ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); - if (result != GENERIC_EE_SUCCESS_FVAL) + + switch (result) { + case GENERIC_EE_SUCCESS: + case GENERIC_EE_CHANNEL_NOT_RUNNING: + gsi->result = 0; + break; + + case GENERIC_EE_RETRY: + gsi->result = -EAGAIN; + break; + + default: dev_err(gsi->dev, "global INT1 generic result %u\n", result); + gsi->result = -EIO; + break; + } complete(&gsi->completion); } @@ -1067,15 +1145,15 @@ static void gsi_isr_glob_ee(struct gsi *gsi) val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); - if (val & ERROR_INT_FMASK) + if (val & BIT(ERROR_INT)) gsi_isr_glob_err(gsi); iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); - val &= ~ERROR_INT_FMASK; + val &= ~BIT(ERROR_INT); - if (val & GP_INT1_FMASK) { - val ^= GP_INT1_FMASK; + if (val & BIT(GP_INT1)) { + val ^= BIT(GP_INT1); gsi_isr_gp_int1(gsi); } @@ -1110,8 +1188,7 @@ static void gsi_isr_general(struct gsi *gsi) val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); - if (val) - dev_err(dev, "unexpected general interrupt 0x%08x\n", val); + dev_err(dev, "unexpected general interrupt 0x%08x\n", val); } /** @@ -1128,6 +1205,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) u32 intr_mask; u32 cnt = 0; + /* enum gsi_irq_type_id defines GSI interrupt types */ while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { /* intr_mask contains bitmask of pending GSI interrupts */ do { @@ -1136,19 +1214,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) intr_mask ^= gsi_intr; switch (gsi_intr) { - case CH_CTRL_FMASK: + case BIT(GSI_CH_CTRL): gsi_isr_chan_ctrl(gsi); break; - case EV_CTRL_FMASK: + case BIT(GSI_EV_CTRL): gsi_isr_evt_ctrl(gsi); break; - case GLOB_EE_FMASK: + case BIT(GSI_GLOB_EE): gsi_isr_glob_ee(gsi); break; - case IEOB_FMASK: + case BIT(GSI_IEOB): gsi_isr_ieob(gsi); break; - case GENERAL_FMASK: + case BIT(GSI_GENERAL): gsi_isr_general(gsi); break; default: @@ -1168,6 +1246,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned int irq; + int ret; + + ret = platform_get_irq_byname(pdev, "gsi"); + if (ret <= 0) { + dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); + return ret ? : -EINVAL; + } + irq = ret; + + ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); + if (ret) { + dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); + return ret; + } + gsi->irq = irq; + + return 0; +} + +static void gsi_irq_exit(struct gsi *gsi) +{ + free_irq(gsi->irq, gsi); +} + /* Return the transaction associated with a transfer completion event */ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, struct gsi_event *event) @@ -1452,8 +1558,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi) } /* Setup function for a single channel */ -static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, - bool legacy) +static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; u32 evt_ring_id = channel->evt_ring_id; @@ -1472,7 +1577,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, if (ret) goto err_evt_ring_de_alloc; - gsi_channel_program(channel, legacy); + gsi_channel_program(channel, true); if (channel->toward_ipa) netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, @@ -1511,8 +1616,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, enum gsi_generic_cmd_opcode opcode) { struct completion *completion = &gsi->completion; + bool success; u32 val; + /* The error global interrupt type is always enabled (until we + * teardown), so we won't change that. A generic EE command + * completes with a GSI global interrupt of type GP_INT1. We + * only perform one generic command at a time (to allocate or + * halt a modem channel) and only from this function. So we + * enable the GP_INT1 IRQ type here while we're expecting it. + */ + val = BIT(ERROR_INT) | BIT(GP_INT1); + iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + /* First zero the result code field */ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); val &= ~GENERIC_EE_RESULT_FMASK; @@ -1523,8 +1639,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); - if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion)) - return 0; /* Success! */ + success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); + + /* Disable the GP_INT1 IRQ type again */ + iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + + if (success) + return gsi->result; dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", opcode, channel_id); @@ -1540,16 +1661,21 @@ static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) { + u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; int ret; - ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL); + do + ret = gsi_generic_command(gsi, channel_id, + GSI_GENERIC_HALT_CHANNEL); + while (ret == -EAGAIN && retries--); + if (ret) dev_err(gsi->dev, "error %d halting modem channel %u\n", ret, channel_id); } /* Setup function for channels */ -static int gsi_channel_setup(struct gsi *gsi, bool legacy) +static int gsi_channel_setup(struct gsi *gsi) { u32 channel_id = 0; u32 mask; @@ -1561,7 +1687,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy) mutex_lock(&gsi->mutex); do { - ret = gsi_channel_setup_one(gsi, channel_id, legacy); + ret = gsi_channel_setup_one(gsi, channel_id); if (ret) goto err_unwind; } while (++channel_id < gsi->channel_count); @@ -1647,10 +1773,11 @@ static void gsi_channel_teardown(struct gsi *gsi) } /* Setup function for GSI. GSI firmware must be loaded and initialized */ -int gsi_setup(struct gsi *gsi, bool legacy) +int gsi_setup(struct gsi *gsi) { struct device *dev = gsi->dev; u32 val; + int ret; /* Here is where we first touch the GSI hardware */ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); @@ -1659,6 +1786,8 @@ int gsi_setup(struct gsi *gsi, bool legacy) return -EIO; } + gsi_irq_setup(gsi); + val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); @@ -1691,13 +1820,18 @@ int gsi_setup(struct gsi *gsi, bool legacy) /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); - return gsi_channel_setup(gsi, legacy); + ret = gsi_channel_setup(gsi); + if (ret) + gsi_irq_teardown(gsi); + + return ret; } /* Inverse of gsi_setup() */ void gsi_teardown(struct gsi *gsi) { gsi_channel_teardown(gsi); + gsi_irq_teardown(gsi); } /* Initialize a channel's event ring */ @@ -1745,7 +1879,7 @@ static void gsi_evt_ring_init(struct gsi *gsi) u32 evt_ring_id = 0; gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); - gsi->event_enable_bitmap = 0; + gsi->ieob_enabled_bitmap = 0; do init_completion(&gsi->evt_ring[evt_ring_id].completion); while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); @@ -1814,7 +1948,7 @@ static bool gsi_channel_data_valid(struct gsi *gsi, /* Init function for a single channel */ static int gsi_channel_init_one(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data, - bool command, bool prefetch) + bool command) { struct gsi_channel *channel; u32 tre_count; @@ -1838,7 +1972,6 @@ static int gsi_channel_init_one(struct gsi *gsi, channel->gsi = gsi; channel->toward_ipa = data->toward_ipa; channel->command = command; - channel->use_prefetch = command && prefetch; channel->tlv_count = data->channel.tlv_count; channel->tre_count = tre_count; channel->event_count = data->channel.event_count; @@ -1892,13 +2025,16 @@ static void gsi_channel_exit_one(struct gsi_channel *channel) } /* Init function for channels */ -static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count, - const struct ipa_gsi_endpoint_data *data, - bool modem_alloc) +static int gsi_channel_init(struct gsi *gsi, u32 count, + const struct ipa_gsi_endpoint_data *data) { + bool modem_alloc; int ret = 0; u32 i; + /* IPA v4.2 requires the AP to allocate channels for the modem */ + modem_alloc = gsi->version == IPA_VERSION_4_2; + gsi_evt_ring_init(gsi); /* The endpoint data array is indexed by endpoint name */ @@ -1916,7 +2052,7 @@ static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count, continue; } - ret = gsi_channel_init_one(gsi, &data[i], command, prefetch); + ret = gsi_channel_init_one(gsi, &data[i], command); if (ret) goto err_unwind; } @@ -1952,19 +2088,20 @@ static void gsi_channel_exit(struct gsi *gsi) } /* Init function for GSI. GSI hardware does not need to be "ready" */ -int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, - u32 count, const struct ipa_gsi_endpoint_data *data, - bool modem_alloc) +int gsi_init(struct gsi *gsi, struct platform_device *pdev, + enum ipa_version version, u32 count, + const struct ipa_gsi_endpoint_data *data) { struct device *dev = &pdev->dev; struct resource *res; resource_size_t size; - unsigned int irq; + u32 adjust; int ret; gsi_validate_build(); gsi->dev = dev; + gsi->version = version; /* The GSI layer performs NAPI on all endpoints. NAPI requires a * network device structure, but the GSI layer does not have one, @@ -1972,55 +2109,53 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, */ init_dummy_netdev(&gsi->dummy_dev); - ret = platform_get_irq_byname(pdev, "gsi"); - if (ret <= 0) { - dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); - return ret ? : -EINVAL; - } - irq = ret; - - ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); - if (ret) { - dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); - return ret; - } - gsi->irq = irq; - /* Get GSI memory range and map it */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); if (!res) { dev_err(dev, "DT error getting \"gsi\" memory property\n"); - ret = -ENODEV; - goto err_free_irq; + return -ENODEV; } size = resource_size(res); if (res->start > U32_MAX || size > U32_MAX - res->start) { dev_err(dev, "DT memory resource \"gsi\" out of range\n"); - ret = -EINVAL; - goto err_free_irq; + return -EINVAL; + } + + /* Make sure we can make our pointer adjustment if necessary */ + adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; + if (res->start < adjust) { + dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", + adjust); + return -EINVAL; } gsi->virt = ioremap(res->start, size); if (!gsi->virt) { dev_err(dev, "unable to remap \"gsi\" memory\n"); - ret = -ENOMEM; - goto err_free_irq; + return -ENOMEM; } + /* Adjust register range pointer downward for newer IPA versions */ + gsi->virt -= adjust; - ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); + init_completion(&gsi->completion); + + ret = gsi_irq_init(gsi, pdev); if (ret) goto err_iounmap; + ret = gsi_channel_init(gsi, count, data); + if (ret) + goto err_irq_exit; + mutex_init(&gsi->mutex); - init_completion(&gsi->completion); return 0; +err_irq_exit: + gsi_irq_exit(gsi); err_iounmap: iounmap(gsi->virt); -err_free_irq: - free_irq(gsi->irq, gsi); return ret; } @@ -2030,7 +2165,7 @@ void gsi_exit(struct gsi *gsi) { mutex_destroy(&gsi->mutex); gsi_channel_exit(gsi); - free_irq(gsi->irq, gsi); + gsi_irq_exit(gsi); iounmap(gsi->virt); } diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 3f9f29d531c4..96c9aed397aa 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -13,6 +13,8 @@ #include <linux/platform_device.h> #include <linux/netdevice.h> +#include "ipa_version.h" + /* Maximum number of channels and event rings supported by the driver */ #define GSI_CHANNEL_COUNT_MAX 17 #define GSI_EVT_RING_COUNT_MAX 13 @@ -31,10 +33,10 @@ struct ipa_gsi_endpoint_data; /* Execution environment IDs */ enum gsi_ee_id { - GSI_EE_AP = 0, - GSI_EE_MODEM = 1, - GSI_EE_UC = 2, - GSI_EE_TZ = 3, + GSI_EE_AP = 0x0, + GSI_EE_MODEM = 0x1, + GSI_EE_UC = 0x2, + GSI_EE_TZ = 0x3, }; struct gsi_ring { @@ -94,12 +96,12 @@ struct gsi_trans_info { /* Hardware values signifying the state of a channel */ enum gsi_channel_state { - GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0, - GSI_CHANNEL_STATE_ALLOCATED = 0x1, - GSI_CHANNEL_STATE_STARTED = 0x2, - GSI_CHANNEL_STATE_STOPPED = 0x3, - GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4, - GSI_CHANNEL_STATE_ERROR = 0xf, + GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0, + GSI_CHANNEL_STATE_ALLOCATED = 0x1, + GSI_CHANNEL_STATE_STARTED = 0x2, + GSI_CHANNEL_STATE_STOPPED = 0x3, + GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4, + GSI_CHANNEL_STATE_ERROR = 0xf, }; /* We only care about channels between IPA and AP */ @@ -107,7 +109,6 @@ struct gsi_channel { struct gsi *gsi; bool toward_ipa; bool command; /* AP command TX channel or not */ - bool use_prefetch; /* use prefetch (else escape buf) */ u8 tlv_count; /* # entries in TLV FIFO */ u16 tre_count; @@ -147,6 +148,7 @@ struct gsi_evt_ring { struct gsi { struct device *dev; /* Same as IPA device */ + enum ipa_version version; struct net_device dummy_dev; /* needed for NAPI */ void __iomem *virt; u32 irq; @@ -154,24 +156,25 @@ struct gsi { u32 evt_ring_count; struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; - u32 event_bitmap; - u32 event_enable_bitmap; - u32 modem_channel_bitmap; + u32 event_bitmap; /* allocated event rings */ + u32 modem_channel_bitmap; /* modem channels to allocate */ + u32 type_enabled_bitmap; /* GSI IRQ types enabled */ + u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */ struct completion completion; /* for global EE commands */ + int result; /* Negative errno (generic commands) */ struct mutex mutex; /* protects commands, programming */ }; /** * gsi_setup() - Set up the GSI subsystem * @gsi: Address of GSI structure embedded in an IPA structure - * @legacy: Set up for legacy hardware * * Return: 0 if successful, or a negative error code * * Performs initialization that must wait until the GSI hardware is * ready (including firmware loaded). */ -int gsi_setup(struct gsi *gsi, bool legacy); +int gsi_setup(struct gsi *gsi); /** * gsi_teardown() - Tear down GSI subsystem @@ -219,15 +222,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id); * gsi_channel_reset() - Reset an allocated GSI channel * @gsi: GSI pointer * @channel_id: Channel to be reset - * @legacy: Legacy behavior + * @doorbell: Whether to (possibly) enable the doorbell engine * - * Reset a channel and reconfigure it. The @legacy flag indicates - * that some steps should be done differently for legacy hardware. + * Reset a channel and reconfigure it. The @doorbell flag indicates + * that the doorbell engine should be enabled if needed. * * GSI hardware relinquishes ownership of all pending receive buffer * transactions and they will complete with their cancelled flag set. */ -void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy); +void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell); int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop); int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); @@ -236,15 +239,18 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); * gsi_init() - Initialize the GSI subsystem * @gsi: Address of GSI structure embedded in an IPA structure * @pdev: IPA platform device + * @version: IPA hardware version (implies GSI version) + * @count: Number of entries in the configuration data array + * @data: Endpoint and channel configuration data * * Return: 0 if successful, or a negative error code * * Early stage initialization of the GSI subsystem, performing tasks * that can be done before the GSI hardware is ready to use. */ -int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, - u32 count, const struct ipa_gsi_endpoint_data *data, - bool modem_alloc); +int gsi_init(struct gsi *gsi, struct platform_device *pdev, + enum ipa_version version, u32 count, + const struct ipa_gsi_endpoint_data *data); /** * gsi_exit() - Exit the GSI subsystem diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index 8e0e9350c383..0e138bbd8205 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -38,6 +38,17 @@ * (though the actual limit is hardware-dependent). */ +/* GSI EE registers as a group are shifted downward by a fixed + * constant amount for IPA versions 4.5 and beyond. This applies + * to all GSI registers we use *except* the ones that disable + * inter-EE interrupts for channels and event channels. + * + * We handle this by adjusting the pointer to the mapped GSI memory + * region downward. Then in the one place we use them (gsi_irq_setup()) + * we undo that adjustment for the inter-EE interrupt registers. + */ +#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */ + #define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \ GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP) #define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \ @@ -66,12 +77,20 @@ #define CHTYPE_DIR_FMASK GENMASK(3, 3) #define EE_FMASK GENMASK(7, 4) #define CHID_FMASK GENMASK(12, 8) -/* The next field is present for GSI v2.0 and above */ +/* The next field is present for IPA v4.5 and above */ #define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13) #define ERINDEX_FMASK GENMASK(18, 14) #define CHSTATE_FMASK GENMASK(23, 20) #define ELEMENT_SIZE_FMASK GENMASK(31, 24) +/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */ +enum gsi_channel_type { + GSI_CHANNEL_TYPE_MHI = 0x0, + GSI_CHANNEL_TYPE_XHCI = 0x1, + GSI_CHANNEL_TYPE_GPI = 0x2, + GSI_CHANNEL_TYPE_XDCI = 0x3, +}; + #define GSI_CH_C_CNTXT_1_OFFSET(ch) \ GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP) #define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \ @@ -95,8 +114,18 @@ #define WRR_WEIGHT_FMASK GENMASK(3, 0) #define MAX_PREFETCH_FMASK GENMASK(8, 8) #define USE_DB_ENG_FMASK GENMASK(9, 9) -/* The next field is present for GSI v2.0 and above */ +/* The next field is only present for IPA v4.0, v4.1, and v4.2 */ #define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10) +/* The next two fields are present for IPA v4.5 and above */ +#define PREFETCH_MODE_FMASK GENMASK(13, 10) +#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16) +/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */ +enum gsi_prefetch_mode { + GSI_USE_PREFETCH_BUFS = 0x0, + GSI_ESCAPE_BUF_ONLY = 0x1, + GSI_SMART_PREFETCH = 0x2, + GSI_FREE_PREFETCH = 0x3, +}; #define GSI_CH_C_SCRATCH_0_OFFSET(ch) \ GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP) @@ -128,6 +157,7 @@ #define EV_INTYPE_FMASK GENMASK(16, 16) #define EV_CHSTATE_FMASK GENMASK(23, 20) #define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24) +/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */ #define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \ GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP) @@ -216,6 +246,15 @@ #define CH_CHID_FMASK GENMASK(7, 0) #define CH_OPCODE_FMASK GENMASK(31, 24) +/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */ +enum gsi_ch_cmd_opcode { + GSI_CH_ALLOCATE = 0x0, + GSI_CH_START = 0x1, + GSI_CH_STOP = 0x2, + GSI_CH_RESET = 0x9, + GSI_CH_DE_ALLOC = 0xa, +}; + #define GSI_EV_CH_CMD_OFFSET \ GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP) #define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \ @@ -223,6 +262,13 @@ #define EV_CHID_FMASK GENMASK(7, 0) #define EV_OPCODE_FMASK GENMASK(31, 24) +/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */ +enum gsi_evt_cmd_opcode { + GSI_EVT_ALLOCATE = 0x0, + GSI_EVT_RESET = 0x9, + GSI_EVT_DE_ALLOC = 0xa, +}; + #define GSI_GENERIC_CMD_OFFSET \ GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP) #define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \ @@ -231,29 +277,43 @@ #define GENERIC_CHID_FMASK GENMASK(9, 5) #define GENERIC_EE_FMASK GENMASK(13, 10) +/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */ +enum gsi_generic_cmd_opcode { + GSI_GENERIC_HALT_CHANNEL = 0x1, + GSI_GENERIC_ALLOCATE_CHANNEL = 0x2, +}; + #define GSI_GSI_HW_PARAM_2_OFFSET \ GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP) #define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \ (0x0001f040 + 0x4000 * (ee)) #define IRAM_SIZE_FMASK GENMASK(2, 0) -#define IRAM_SIZE_ONE_KB_FVAL 0 -#define IRAM_SIZE_TWO_KB_FVAL 1 -/* The next two values are available for GSI v2.0 and above */ -#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2 -#define IRAM_SIZE_THREE_KB_FVAL 3 #define NUM_CH_PER_EE_FMASK GENMASK(7, 3) #define NUM_EV_PER_EE_FMASK GENMASK(12, 8) #define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13) #define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14) -/* Fields below are present for GSI v2.0 and above */ +/* Fields below are present for IPA v4.0 and above */ #define GSI_USE_SDMA_FMASK GENMASK(15, 15) #define GSI_SDMA_N_INT_FMASK GENMASK(18, 16) #define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19) #define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27) -/* Fields below are present for GSI v2.2 and above */ +/* Fields below are present for IPA v4.2 and above */ #define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30) #define GSI_USE_INTER_EE_FMASK GENMASK(31, 31) +/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */ +enum gsi_iram_size { + IRAM_SIZE_ONE_KB = 0x0, + IRAM_SIZE_TWO_KB = 0x1, +/* The next two values are available for IPA v4.0 and above */ + IRAM_SIZE_TWO_N_HALF_KB = 0x2, + IRAM_SIZE_THREE_KB = 0x3, + /* The next two values are available for IPA v4.5 and above */ + IRAM_SIZE_THREE_N_HALF_KB = 0x4, + IRAM_SIZE_FOUR_KB = 0x5, +}; + +/* IRQ condition for each type is cleared by writing type-specific register */ #define GSI_CNTXT_TYPE_IRQ_OFFSET \ GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP) #define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \ @@ -262,15 +322,17 @@ GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP) #define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \ (0x0001f088 + 0x4000 * (ee)) -/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */ -#define CH_CTRL_FMASK GENMASK(0, 0) -#define EV_CTRL_FMASK GENMASK(1, 1) -#define GLOB_EE_FMASK GENMASK(2, 2) -#define IEOB_FMASK GENMASK(3, 3) -#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4) -#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5) -#define GENERAL_FMASK GENMASK(6, 6) -#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0) + +/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */ +enum gsi_irq_type_id { + GSI_CH_CTRL = 0x0, /* channel allocation, etc. */ + GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */ + GSI_GLOB_EE = 0x2, /* global/general event */ + GSI_IEOB = 0x3, /* TRE completion */ + GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */ + GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */ + GSI_GENERAL = 0x6, /* general-purpose event */ +}; #define GSI_CNTXT_SRC_CH_IRQ_OFFSET \ GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP) @@ -329,12 +391,13 @@ GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP) #define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \ (0x0001f110 + 0x4000 * (ee)) -/* The masks below are used for the general IRQ STTS, EN, and CLR registers */ -#define ERROR_INT_FMASK GENMASK(0, 0) -#define GP_INT1_FMASK GENMASK(1, 1) -#define GP_INT2_FMASK GENMASK(2, 2) -#define GP_INT3_FMASK GENMASK(3, 3) -#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0) +/* Values here are bit positions in the GLOB_IRQ_* registers */ +enum gsi_global_irq_id { + ERROR_INT = 0x0, + GP_INT1 = 0x1, + GP_INT2 = 0x2, + GP_INT3 = 0x3, +}; #define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \ GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP) @@ -348,12 +411,13 @@ GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP) #define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \ (0x0001f128 + 0x4000 * (ee)) -/* The masks below are used for the general IRQ STTS, EN, and CLR registers */ -#define BREAK_POINT_FMASK GENMASK(0, 0) -#define BUS_ERROR_FMASK GENMASK(1, 1) -#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2) -#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3) -#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0) +/* Values here are bit positions in the (general) GSI_IRQ_* registers */ +enum gsi_general_id { + BREAK_POINT = 0x0, + BUS_ERROR = 0x1, + CMD_FIFO_OVRFLOW = 0x2, + MCS_STACK_OVRFLOW = 0x3, +}; #define GSI_CNTXT_INTSET_OFFSET \ GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP) @@ -373,6 +437,25 @@ #define ERR_TYPE_FMASK GENMASK(27, 24) #define ERR_EE_FMASK GENMASK(31, 28) +/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */ +enum gsi_err_code { + GSI_INVALID_TRE = 0x1, + GSI_OUT_OF_BUFFERS = 0x2, + GSI_OUT_OF_RESOURCES = 0x3, + GSI_UNSUPPORTED_INTER_EE_OP = 0x4, + GSI_EVT_RING_EMPTY = 0x5, + GSI_NON_ALLOCATED_EVT_ACCESS = 0x6, + /* 7 is not assigned */ + GSI_HWO_1 = 0x8, +}; + +/** enum gsi_err_type - ERR_TYPE field values in EE_ERR_LOG */ +enum gsi_err_type { + GSI_ERR_TYPE_GLOB = 0x1, + GSI_ERR_TYPE_CHAN = 0x2, + GSI_ERR_TYPE_EVT = 0x3, +}; + #define GSI_ERROR_LOG_CLR_OFFSET \ GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP) #define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \ @@ -384,10 +467,18 @@ (0x0001f400 + 0x4000 * (ee)) #define INTER_EE_RESULT_FMASK GENMASK(2, 0) #define GENERIC_EE_RESULT_FMASK GENMASK(7, 5) -#define GENERIC_EE_SUCCESS_FVAL 1 -#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3 -#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5 -#define GENERIC_EE_NO_RESOURCES_FVAL 7 + +/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */ +enum gsi_generic_ee_result { + GENERIC_EE_SUCCESS = 0x1, + GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2, + GENERIC_EE_INCORRECT_DIRECTION = 0x3, + GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4, + GENERIC_EE_INCORRECT_CHANNEL = 0x5, + GENERIC_EE_RETRY = 0x6, + GENERIC_EE_NO_RESOURCES = 0x7, +}; + #define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */ #define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24) diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c index a2c0fde05819..9dcf16f399b7 100644 --- a/drivers/net/ipa/ipa_clock.c +++ b/drivers/net/ipa/ipa_clock.c @@ -13,6 +13,7 @@ #include "ipa.h" #include "ipa_clock.h" #include "ipa_modem.h" +#include "ipa_data.h" /** * DOC: IPA Clocking @@ -29,18 +30,6 @@ * An IPA clock reference must be held for any access to IPA hardware. */ -#define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */ - -/* Interconnect path bandwidths (each times 1000 bytes per second) */ -#define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */ -#define IPA_MEMORY_PEAK (600 * 1000) - -#define IPA_IMEM_AVG (80 * 1000) -#define IPA_IMEM_PEAK (350 * 1000) - -#define IPA_CONFIG_AVG (40 * 1000) -#define IPA_CONFIG_PEAK (40 * 1000) - /** * struct ipa_clock - IPA clocking information * @count: Clocking reference count @@ -49,6 +38,7 @@ * @memory_path: Memory interconnect * @imem_path: Internal memory interconnect * @config_path: Configuration space interconnect + * @interconnect_data: Interconnect configuration data */ struct ipa_clock { refcount_t count; @@ -57,6 +47,7 @@ struct ipa_clock { struct icc_path *memory_path; struct icc_path *imem_path; struct icc_path *config_path; + const struct ipa_interconnect_data *interconnect_data; }; static struct icc_path * @@ -113,18 +104,25 @@ static void ipa_interconnect_exit(struct ipa_clock *clock) /* Currently we only use one bandwidth level, so just "enable" interconnects */ static int ipa_interconnect_enable(struct ipa *ipa) { + const struct ipa_interconnect_data *data; struct ipa_clock *clock = ipa->clock; int ret; - ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK); + data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY]; + ret = icc_set_bw(clock->memory_path, data->average_rate, + data->peak_rate); if (ret) return ret; - ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK); + data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM]; + ret = icc_set_bw(clock->memory_path, data->average_rate, + data->peak_rate); if (ret) goto err_memory_path_disable; - ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK); + data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG]; + ret = icc_set_bw(clock->memory_path, data->average_rate, + data->peak_rate); if (ret) goto err_imem_path_disable; @@ -141,6 +139,7 @@ err_memory_path_disable: /* To disable an interconnect, we just its bandwidth to 0 */ static int ipa_interconnect_disable(struct ipa *ipa) { + const struct ipa_interconnect_data *data; struct ipa_clock *clock = ipa->clock; int ret; @@ -159,9 +158,13 @@ static int ipa_interconnect_disable(struct ipa *ipa) return 0; err_imem_path_reenable: - (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK); + data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM]; + (void)icc_set_bw(clock->imem_path, data->average_rate, + data->peak_rate); err_memory_path_reenable: - (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK); + data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY]; + (void)icc_set_bw(clock->memory_path, data->average_rate, + data->peak_rate); return ret; } @@ -257,7 +260,8 @@ u32 ipa_clock_rate(struct ipa *ipa) } /* Initialize IPA clocking */ -struct ipa_clock *ipa_clock_init(struct device *dev) +struct ipa_clock * +ipa_clock_init(struct device *dev, const struct ipa_clock_data *data) { struct ipa_clock *clock; struct clk *clk; @@ -269,10 +273,10 @@ struct ipa_clock *ipa_clock_init(struct device *dev) return ERR_CAST(clk); } - ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE); + ret = clk_set_rate(clk, data->core_clock_rate); if (ret) { - dev_err(dev, "error %d setting core clock rate to %lu\n", - ret, IPA_CORE_CLOCK_RATE); + dev_err(dev, "error %d setting core clock rate to %u\n", + ret, data->core_clock_rate); goto err_clk_put; } @@ -282,6 +286,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev) goto err_clk_put; } clock->core = clk; + clock->interconnect_data = data->interconnect; ret = ipa_interconnect_init(clock, dev); if (ret) diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h index 1d70f1de3875..1fe634760e59 100644 --- a/drivers/net/ipa/ipa_clock.h +++ b/drivers/net/ipa/ipa_clock.h @@ -9,6 +9,7 @@ struct device; struct ipa; +struct ipa_clock_data; /** * ipa_clock_rate() - Return the current IPA core clock rate @@ -21,10 +22,12 @@ u32 ipa_clock_rate(struct ipa *ipa); /** * ipa_clock_init() - Initialize IPA clocking * @dev: IPA device + * @data: Clock configuration data * * Return: A pointer to an ipa_clock structure, or a pointer-coded error */ -struct ipa_clock *ipa_clock_init(struct device *dev); +struct ipa_clock *ipa_clock_init(struct device *dev, + const struct ipa_clock_data *data); /** * ipa_clock_exit() - Inverse of ipa_clock_init() diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index d92dd3f09b73..002e51448510 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -38,9 +38,9 @@ /* Some commands can wait until indicated pipeline stages are clear */ enum pipeline_clear_options { - pipeline_clear_hps = 0, - pipeline_clear_src_grp = 1, - pipeline_clear_full = 2, + pipeline_clear_hps = 0x0, + pipeline_clear_src_grp = 0x1, + pipeline_clear_full = 0x2, }; /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */ diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index f7e6f87facf7..4ed09c486abc 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -27,16 +27,16 @@ struct gsi_channel; * a request is *not* an immediate command. */ enum ipa_cmd_opcode { - IPA_CMD_NONE = 0, - IPA_CMD_IP_V4_FILTER_INIT = 3, - IPA_CMD_IP_V6_FILTER_INIT = 4, - IPA_CMD_IP_V4_ROUTING_INIT = 7, - IPA_CMD_IP_V6_ROUTING_INIT = 8, - IPA_CMD_HDR_INIT_LOCAL = 9, - IPA_CMD_REGISTER_WRITE = 12, - IPA_CMD_IP_PACKET_INIT = 16, - IPA_CMD_DMA_SHARED_MEM = 19, - IPA_CMD_IP_PACKET_TAG_STATUS = 20, + IPA_CMD_NONE = 0x0, + IPA_CMD_IP_V4_FILTER_INIT = 0x3, + IPA_CMD_IP_V6_FILTER_INIT = 0x4, + IPA_CMD_IP_V4_ROUTING_INIT = 0x7, + IPA_CMD_IP_V6_ROUTING_INIT = 0x8, + IPA_CMD_HDR_INIT_LOCAL = 0x9, + IPA_CMD_REGISTER_WRITE = 0xc, + IPA_CMD_IP_PACKET_INIT = 0x10, + IPA_CMD_DMA_SHARED_MEM = 0x13, + IPA_CMD_IP_PACKET_TAG_STATUS = 0x14, }; /** @@ -50,7 +50,6 @@ struct ipa_cmd_info { enum dma_data_direction direction; }; - #ifdef IPA_VALIDATE /** diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c index d4c2bc7ad24b..5cc0ed77edb9 100644 --- a/drivers/net/ipa/ipa_data-sc7180.c +++ b/drivers/net/ipa/ipa_data-sc7180.c @@ -24,6 +24,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_DMA_ONLY, .config = { + .resource_group = 0, .dma_mode = true, .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, }, @@ -42,6 +43,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { + .resource_group = 0, .aggregation = true, .status_enable = true, .rx = { @@ -65,6 +67,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .seq_type = IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP, .config = { + .resource_group = 0, .checksum = true, .qmap = true, .status_enable = true, @@ -88,6 +91,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { + .resource_group = 0, .checksum = true, .qmap = true, .aggregation = true, @@ -305,6 +309,26 @@ static struct ipa_mem_data ipa_mem_data = { .smem_size = 0x00002000, }; +static struct ipa_clock_data ipa_clock_data = { + .core_clock_rate = 100 * 1000 * 1000, /* Hz */ + /* Interconnect rates are in 1000 byte/second units */ + .interconnect = { + [IPA_INTERCONNECT_MEMORY] = { + .peak_rate = 465000, /* 465 MBps */ + .average_rate = 80000, /* 80 MBps */ + }, + /* Average rate is unused for the next two interconnects */ + [IPA_INTERCONNECT_IMEM] = { + .peak_rate = 68570, /* 68.570 MBps */ + .average_rate = 0, /* unused */ + }, + [IPA_INTERCONNECT_CONFIG] = { + .peak_rate = 30000, /* 30 MBps */ + .average_rate = 0, /* unused */ + }, + }, +}; + /* Configuration data for the SC7180 SoC. */ const struct ipa_data ipa_data_sc7180 = { .version = IPA_VERSION_4_2, @@ -312,4 +336,5 @@ const struct ipa_data ipa_data_sc7180 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, + .clock_data = &ipa_clock_data, }; diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c index de2768d71ab5..f8fee8d3ca42 100644 --- a/drivers/net/ipa/ipa_data-sdm845.c +++ b/drivers/net/ipa/ipa_data-sdm845.c @@ -26,6 +26,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_DMA_ONLY, .config = { + .resource_group = 1, .dma_mode = true, .dma_endpoint = IPA_ENDPOINT_AP_LAN_RX, }, @@ -44,6 +45,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { + .resource_group = 1, .aggregation = true, .status_enable = true, .rx = { @@ -67,6 +69,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .seq_type = IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP, .config = { + .resource_group = 1, .checksum = true, .qmap = true, .status_enable = true, @@ -90,6 +93,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .seq_type = IPA_SEQ_INVALID, .config = { + .resource_group = 1, .checksum = true, .qmap = true, .aggregation = true, @@ -146,11 +150,11 @@ static const struct ipa_resource_src ipa_resource_src[] = { .type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS, .limits[0] = { .min = 1, - .max = 63, + .max = 255, }, .limits[1] = { .min = 1, - .max = 63, + .max = 255, }, }, { @@ -325,6 +329,26 @@ static struct ipa_mem_data ipa_mem_data = { .smem_size = 0x00002000, }; +static struct ipa_clock_data ipa_clock_data = { + .core_clock_rate = 75 * 1000 * 1000, /* Hz */ + /* Interconnect rates are in 1000 byte/second units */ + .interconnect = { + [IPA_INTERCONNECT_MEMORY] = { + .peak_rate = 600000, /* 600 MBps */ + .average_rate = 80000, /* 80 MBps */ + }, + /* Average rate is unused for the next two interconnects */ + [IPA_INTERCONNECT_IMEM] = { + .peak_rate = 350000, /* 350 MBps */ + .average_rate = 0, /* unused */ + }, + [IPA_INTERCONNECT_CONFIG] = { + .peak_rate = 40000, /* 40 MBps */ + .average_rate = 0, /* unused */ + }, + }, +}; + /* Configuration data for the SDM845 SoC. */ const struct ipa_data ipa_data_sdm845 = { .version = IPA_VERSION_3_5_1, @@ -332,4 +356,5 @@ const struct ipa_data ipa_data_sdm845 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, + .clock_data = &ipa_clock_data, }; diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 7fc1058a5ca9..0ed5ffe2b8da 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -45,10 +45,10 @@ * the IPA endpoint. */ -/* The maximum value returned by ipa_resource_group_count() */ -#define IPA_RESOURCE_GROUP_COUNT 4 +/* The maximum value returned by ipa_resource_group_{src,dst}_count() */ +#define IPA_RESOURCE_GROUP_SRC_MAX 5 +#define IPA_RESOURCE_GROUP_DST_MAX 5 -/** enum ipa_resource_type_src - source resource types */ /** * struct gsi_channel_data - GSI channel configuration data * @tre_count: number of TREs in the channel ring @@ -109,6 +109,7 @@ struct ipa_endpoint_rx_data { /** * struct ipa_endpoint_config_data - IPA endpoint hardware configuration + * @resource_group: resource group to assign endpoint to * @checksum: whether checksum offload is enabled * @qmap: whether endpoint uses QMAP protocol * @aggregation: whether endpoint supports aggregation @@ -119,6 +120,7 @@ struct ipa_endpoint_rx_data { * @rx: RX-specific endpoint information (see above) */ struct ipa_endpoint_config_data { + u32 resource_group; bool checksum; bool qmap; bool aggregation; @@ -206,7 +208,7 @@ struct ipa_resource_limits { */ struct ipa_resource_src { enum ipa_resource_type_src type; - struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT]; + struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_SRC_MAX]; }; /** @@ -216,7 +218,7 @@ struct ipa_resource_src { */ struct ipa_resource_dst { enum ipa_resource_type_dst type; - struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT]; + struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_DST_MAX]; }; /** @@ -239,7 +241,7 @@ struct ipa_resource_data { }; /** - * struct ipa_mem - description of IPA memory regions + * struct ipa_mem_data - description of IPA memory regions * @local_count: number of regions defined in the local[] array * @local: array of IPA-local memory region descriptors * @imem_addr: physical address of IPA region within IMEM @@ -256,6 +258,34 @@ struct ipa_mem_data { u32 smem_size; }; +/** enum ipa_interconnect_id - IPA interconnect identifier */ +enum ipa_interconnect_id { + IPA_INTERCONNECT_MEMORY, + IPA_INTERCONNECT_IMEM, + IPA_INTERCONNECT_CONFIG, + IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */ +}; + +/** + * struct ipa_interconnect_data - description of IPA interconnect rates + * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units) + * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units) + */ +struct ipa_interconnect_data { + u32 peak_rate; + u32 average_rate; +}; + +/** + * struct ipa_clock_data - description of IPA clock and interconnect rates + * @core_clock_rate: Core clock rate (Hz) + * @interconnect: Array of interconnect bandwidth parameters + */ +struct ipa_clock_data { + u32 core_clock_rate; + struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT]; +}; + /** * struct ipa_data - combined IPA/GSI configuration data * @version: IPA hardware version @@ -271,6 +301,7 @@ struct ipa_data { const struct ipa_gsi_endpoint_data *endpoint_data; const struct ipa_resource_data *resource_data; const struct ipa_mem_data *mem_data; + const struct ipa_clock_data *clock_data; }; extern const struct ipa_data ipa_data_sdm845; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index b40b711cf4bd..9f4be9812a1f 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -37,7 +37,7 @@ #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 -#define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */ +#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ /** enum ipa_status_opcode - status element opcode hardware values */ enum ipa_status_opcode { @@ -74,31 +74,6 @@ struct ipa_status { #ifdef IPA_VALIDATE -static void ipa_endpoint_validate_build(void) -{ - /* The aggregation byte limit defines the point at which an - * aggregation window will close. It is programmed into the - * IPA hardware as a number of KB. We don't use "hard byte - * limit" aggregation, which means that we need to supply - * enough space in a receive buffer to hold a complete MTU - * plus normal skb overhead *after* that aggregation byte - * limit has been crossed. - * - * This check just ensures we don't define a receive buffer - * size that would exceed what we can represent in the field - * that is used to program its size. - */ - BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > - field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + - IPA_MTU + IPA_RX_BUFFER_OVERHEAD); - - /* I honestly don't know where this requirement comes from. But - * it holds, and if we someday need to loosen the constraint we - * can try to track it down. - */ - BUILD_BUG_ON(sizeof(struct ipa_status) % 4); -} - static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *all_data, const struct ipa_gsi_endpoint_data *data) @@ -180,14 +155,24 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; } +static u32 aggr_byte_limit_max(enum ipa_version version) +{ + if (version < IPA_VERSION_4_5) + return field_max(aggr_byte_limit_fmask(true)); + + return field_max(aggr_byte_limit_fmask(false)); +} + static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *data) { const struct ipa_gsi_endpoint_data *dp = data; struct device *dev = &ipa->pdev->dev; enum ipa_endpoint_name name; + u32 limit; - ipa_endpoint_validate_build(); + /* Not sure where this constraint come from... */ + BUILD_BUG_ON(sizeof(struct ipa_status) % 4); if (count > IPA_ENDPOINT_COUNT) { dev_err(dev, "too many endpoints specified (%u > %u)\n", @@ -195,6 +180,26 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, return false; } + /* The aggregation byte limit defines the point at which an + * aggregation window will close. It is programmed into the + * IPA hardware as a number of KB. We don't use "hard byte + * limit" aggregation, which means that we need to supply + * enough space in a receive buffer to hold a complete MTU + * plus normal skb overhead *after* that aggregation byte + * limit has been crossed. + * + * This check ensures we don't define a receive buffer size + * that would exceed what we can represent in the field that + * is used to program its size. + */ + limit = aggr_byte_limit_max(ipa->version) * SZ_1K; + limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + if (limit < IPA_RX_BUFFER_SIZE) { + dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", + IPA_RX_BUFFER_SIZE, limit); + return false; + } + /* Make sure needed endpoints have defined data */ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { dev_err(dev, "command TX endpoint not defined\n"); @@ -485,28 +490,34 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); + struct ipa *ipa = endpoint->ipa; u32 val = 0; if (endpoint->data->qmap) { size_t header_size = sizeof(struct rmnet_map_header); + enum ipa_version version = ipa->version; /* We might supply a checksum header after the QMAP header */ if (endpoint->toward_ipa && endpoint->data->checksum) header_size += sizeof(struct rmnet_map_ul_csum_header); - val |= u32_encode_bits(header_size, HDR_LEN_FMASK); + val |= ipa_header_size_encoded(version, header_size); /* Define how to fill fields in a received QMAP header */ if (!endpoint->toward_ipa) { - u32 off; /* Field offset within header */ + u32 offset; /* Field offset within header */ /* Where IPA will write the metadata value */ - off = offsetof(struct rmnet_map_header, mux_id); - val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK); + offset = offsetof(struct rmnet_map_header, mux_id); + val |= ipa_metadata_offset_encoded(version, offset); /* Where IPA will write the length */ - off = offsetof(struct rmnet_map_header, pkt_len); + offset = offsetof(struct rmnet_map_header, pkt_len); + /* Upper bits are stored in HDR_EXT with IPA v4.5 */ + if (version == IPA_VERSION_4_5) + offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); + val |= HDR_OFST_PKT_SIZE_VALID_FMASK; - val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK); + val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); } /* For QMAP TX, metadata offset is 0 (modem assumes this) */ val |= HDR_OFST_METADATA_VALID_FMASK; @@ -514,16 +525,17 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ /* HDR_A5_MUX is 0 */ /* HDR_LEN_INC_DEAGG_HDR is 0 */ - /* HDR_METADATA_REG_VALID is 0 (TX only) */ + /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ } - iowrite32(val, endpoint->ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + offset); } static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); u32 pad_align = endpoint->data->rx.pad_align; + struct ipa *ipa = endpoint->ipa; u32 val = 0; val |= HDR_ENDIANNESS_FMASK; /* big endian */ @@ -545,10 +557,24 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); - iowrite32(val, endpoint->ipa->reg_virt + offset); + /* IPA v4.5 adds some most-significant bits to a few fields, + * two of which are defined in the HDR (not HDR_EXT) register. + */ + if (ipa->version == IPA_VERSION_4_5) { + /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ + if (endpoint->data->qmap && !endpoint->toward_ipa) { + u32 offset; + + offset = offsetof(struct rmnet_map_header, pkt_len); + offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); + val |= u32_encode_bits(offset, + HDR_OFST_PKT_SIZE_MSB_FMASK); + /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ + } + } + iowrite32(val, ipa->reg_virt + offset); } - static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) { u32 endpoint_id = endpoint->endpoint_id; @@ -603,29 +629,84 @@ static u32 ipa_aggr_size_kb(u32 rx_buffer_size) return rx_buffer_size / SZ_1K; } +/* Encoded values for AGGR endpoint register fields */ +static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) +{ + if (version < IPA_VERSION_4_5) + return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); + + return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); +} + +/* Encode the aggregation timer limit (microseconds) based on IPA version */ +static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) +{ + u32 gran_sel; + u32 fmask; + u32 val; + + if (version < IPA_VERSION_4_5) { + /* We set aggregation granularity in ipa_hardware_config() */ + limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); + + return u32_encode_bits(limit, aggr_time_limit_fmask(true)); + } + + /* IPA v4.5 expresses the time limit using Qtime. The AP has + * pulse generators 0 and 1 available, which were configured + * in ipa_qtime_config() to have granularity 100 usec and + * 1 msec, respectively. Use pulse generator 0 if possible, + * otherwise fall back to pulse generator 1. + */ + fmask = aggr_time_limit_fmask(false); + val = DIV_ROUND_CLOSEST(limit, 100); + if (val > field_max(fmask)) { + /* Have to use pulse generator 1 (millisecond granularity) */ + gran_sel = AGGR_GRAN_SEL_FMASK; + val = DIV_ROUND_CLOSEST(limit, 1000); + } else { + /* We can use pulse generator 0 (100 usec granularity) */ + gran_sel = 0; + } + + return gran_sel | u32_encode_bits(val, fmask); +} + +static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) +{ + u32 val = enabled ? 1 : 0; + + if (version < IPA_VERSION_4_5) + return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); + + return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); +} + static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); + enum ipa_version version = endpoint->ipa->version; u32 val = 0; if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { + bool close_eof; u32 limit; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); - val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK); + val |= aggr_byte_limit_encoded(version, limit); - limit = IPA_AGGR_TIME_LIMIT_DEFAULT; - limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); - val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK); + limit = IPA_AGGR_TIME_LIMIT; + val |= aggr_time_limit_encoded(version, limit); /* AGGR_PKT_LIMIT is 0 (unlimited) */ - if (endpoint->data->rx.aggr_close_eof) - val |= AGGR_SW_EOF_ACTIVE_FMASK; + close_eof = endpoint->data->rx.aggr_close_eof; + val |= aggr_sw_eof_active_encoded(version, close_eof); + /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ } else { val |= u32_encode_bits(IPA_ENABLE_DEAGGR, @@ -634,6 +715,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) /* other fields ignored */ } /* AGGR_FORCE_CLOSE is 0 */ + /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ } else { val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); /* other fields ignored */ @@ -642,12 +724,45 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } -/* The head-of-line blocking timer is defined as a tick count, where each - * tick represents 128 cycles of the IPA core clock. Return the value - * that should be written to that register that represents the timeout - * period provided. +/* Return the Qtime-based head-of-line blocking timer value that + * represents the given number of microseconds. The result + * includes both the timer value and the selected timer granularity. */ -static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) +static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) +{ + u32 gran_sel; + u32 val; + + /* IPA v4.5 expresses time limits using Qtime. The AP has + * pulse generators 0 and 1 available, which were configured + * in ipa_qtime_config() to have granularity 100 usec and + * 1 msec, respectively. Use pulse generator 0 if possible, + * otherwise fall back to pulse generator 1. + */ + val = DIV_ROUND_CLOSEST(microseconds, 100); + if (val > field_max(TIME_LIMIT_FMASK)) { + /* Have to use pulse generator 1 (millisecond granularity) */ + gran_sel = GRAN_SEL_FMASK; + val = DIV_ROUND_CLOSEST(microseconds, 1000); + } else { + /* We can use pulse generator 0 (100 usec granularity) */ + gran_sel = 0; + } + + return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); +} + +/* The head-of-line blocking timer is defined as a tick count. For + * IPA version 4.5 the tick count is based on the Qtimer, which is + * derived from the 19.2 MHz SoC XO clock. For older IPA versions + * each tick represents 128 cycles of the IPA core clock. + * + * Return the encoded value that should be written to that register + * that represents the timeout period provided. For IPA v4.2 this + * encodes a base and scale value, while for earlier versions the + * value is a simple tick count. + */ +static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) { u32 width; u32 scale; @@ -659,14 +774,17 @@ static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) if (!microseconds) return 0; /* Nothing to compute if timer period is 0 */ + if (ipa->version == IPA_VERSION_4_5) + return hol_block_timer_qtime_val(ipa, microseconds); + /* Use 64 bit arithmetic to avoid overflow... */ rate = ipa_clock_rate(ipa); ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); /* ...but we still need to fit into a 32-bit register */ WARN_ON(ticks > U32_MAX); - /* IPA v3.5.1 just records the tick count */ - if (ipa->version == IPA_VERSION_3_5_1) + /* IPA v3.5.1 through v4.1 just record the tick count */ + if (ipa->version < IPA_VERSION_4_2) return (u32)ticks; /* For IPA v4.2, the tick count is represented by base and @@ -704,7 +822,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, u32 val; offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); - val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); + val = hol_block_timer_val(ipa, microseconds); iowrite32(val, ipa->reg_virt + offset); } @@ -751,6 +869,16 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } +static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) +{ + u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); + struct ipa *ipa = endpoint->ipa; + u32 val; + + val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); + iowrite32(val, ipa->reg_virt + offset); +} + static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) { u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); @@ -834,9 +962,10 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint) val |= u32_encode_bits(status_endpoint_id, STATUS_ENDP_FMASK); } - /* STATUS_LOCATION is 0 (status element precedes packet) */ - /* The next field is present for IPA v4.0 and above */ - /* STATUS_PKT_SUPPRESS_FMASK is 0 */ + /* STATUS_LOCATION is 0, meaning status element precedes + * packet (not present for IPA v4.5) + */ + /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ } iowrite32(val, ipa->reg_virt + offset); @@ -1207,7 +1336,6 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) struct gsi *gsi = &ipa->gsi; bool suspended = false; dma_addr_t addr; - bool legacy; u32 retries; u32 len = 1; void *virt; @@ -1269,8 +1397,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) * complete the channel reset sequence. Finish by suspending the * channel again (if necessary). */ - legacy = ipa->version == IPA_VERSION_3_5_1; - gsi_channel_reset(gsi, endpoint->channel_id, legacy); + gsi_channel_reset(gsi, endpoint->channel_id, true); msleep(1); @@ -1293,21 +1420,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) u32 channel_id = endpoint->channel_id; struct ipa *ipa = endpoint->ipa; bool special; - bool legacy; int ret = 0; /* On IPA v3.5.1, if an RX endpoint is reset while aggregation * is active, we need to handle things specially to recover. * All other cases just need to reset the underlying GSI channel. - * - * IPA v3.5.1 enables the doorbell engine. Newer versions do not. */ - legacy = ipa->version == IPA_VERSION_3_5_1; - special = !endpoint->toward_ipa && endpoint->data->aggregation; + special = ipa->version == IPA_VERSION_3_5_1 && + !endpoint->toward_ipa && + endpoint->data->aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); else - gsi_channel_reset(&ipa->gsi, channel_id, legacy); + gsi_channel_reset(&ipa->gsi, channel_id, true); if (ret) dev_err(&ipa->pdev->dev, @@ -1328,6 +1453,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ipa_endpoint_init_mode(endpoint); ipa_endpoint_init_aggr(endpoint); ipa_endpoint_init_deaggr(endpoint); + ipa_endpoint_init_rsrc_grp(endpoint); ipa_endpoint_init_seq(endpoint); ipa_endpoint_status(endpoint); } @@ -1538,8 +1664,8 @@ int ipa_endpoint_config(struct ipa *ipa) val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); /* Our RX is an IPA producer */ - rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK); - max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK); + rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); + max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); if (max > IPA_ENDPOINT_MAX) { dev_err(dev, "too many endpoints (%u > %u)\n", max, IPA_ENDPOINT_MAX); @@ -1548,7 +1674,7 @@ int ipa_endpoint_config(struct ipa *ipa) rx_mask = GENMASK(max - 1, rx_base); /* Our TX is an IPA consumer */ - max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK); + max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); tx_mask = GENMASK(max - 1, 0); ipa->available = rx_mask | tx_mask; diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 58a245de488e..881ecc27bd6e 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -25,7 +25,7 @@ struct ipa_gsi_endpoint_data; #define IPA_MTU ETH_DATA_LEN enum ipa_endpoint_name { - IPA_ENDPOINT_AP_MODEM_TX = 0, + IPA_ENDPOINT_AP_MODEM_TX, IPA_ENDPOINT_MODEM_LAN_TX, IPA_ENDPOINT_MODEM_COMMAND_TX, IPA_ENDPOINT_AP_COMMAND_TX, diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index cc1ea28f7bc2..61dd7605bcb6 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -139,12 +139,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, u32 val; /* assert(mask & ipa->available); */ - val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET); + val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET); if (enable) val |= mask; else val &= ~mask; - iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET); + iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET); } /* Enable TX_SUSPEND for an endpoint */ @@ -168,7 +168,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt) u32 val; val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET); - iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET); + iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_CLR_OFFSET); } /* Simulate arrival of an IPA TX_SUSPEND interrupt */ diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index 727e9c5044d1..b5d63a0cd19e 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -13,22 +13,6 @@ struct ipa; struct ipa_interrupt; /** - * enum ipa_irq_id - IPA interrupt type - * @IPA_IRQ_UC_0: Microcontroller event interrupt - * @IPA_IRQ_UC_1: Microcontroller response interrupt - * @IPA_IRQ_TX_SUSPEND: Data ready interrupt - * - * The data ready interrupt is signaled if data has arrived that is destined - * for an AP RX endpoint whose underlying GSI channel is suspended/stopped. - */ -enum ipa_irq_id { - IPA_IRQ_UC_0 = 2, - IPA_IRQ_UC_1 = 3, - IPA_IRQ_TX_SUSPEND = 14, - IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */ -}; - -/** * typedef ipa_irq_handler_t - IPA interrupt handler function type * @ipa: IPA pointer * @irq_id: interrupt type diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index cd4d993b0bbb..84bb8ae92725 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -70,6 +70,14 @@ #define IPA_FWS_PATH "ipa_fws.mdt" #define IPA_PAS_ID 15 +/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */ +#define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */ +#define TAG_TIMESTAMP_SHIFT 14 +#define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */ + +/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */ +#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */ + /** * ipa_suspend_handler() - Handle the suspend IPA interrupt * @ipa: IPA pointer @@ -111,8 +119,7 @@ int ipa_setup(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret; - /* Setup for IPA v3.5.1 has some slight differences */ - ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1); + ret = gsi_setup(&ipa->gsi); if (ret) return ret; @@ -231,8 +238,10 @@ static void ipa_hardware_config_comp(struct ipa *ipa) val &= ~IPA_QMB_SELECT_CONS_EN_FMASK; val &= ~IPA_QMB_SELECT_PROD_EN_FMASK; val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK; - } else { + } else if (ipa->version < IPA_VERSION_4_5) { val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK; + } else { + /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */ } val |= GSI_MULTI_INORDER_RD_DIS_FMASK; @@ -244,31 +253,100 @@ static void ipa_hardware_config_comp(struct ipa *ipa) /* Configure DDR and PCIe max read/write QSB values */ static void ipa_hardware_config_qsb(struct ipa *ipa) { + enum ipa_version version = ipa->version; + u32 max0; + u32 max1; u32 val; - /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */ + /* QMB_0 represents DDR; QMB_1 represents PCIe */ val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK); - if (ipa->version == IPA_VERSION_4_2) - val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK); - else - val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK); + switch (version) { + case IPA_VERSION_4_2: + max1 = 0; /* PCIe not present */ + break; + case IPA_VERSION_4_5: + max1 = 8; + break; + default: + max1 = 4; + break; + } + val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK); iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET); - if (ipa->version == IPA_VERSION_3_5_1) { - val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK); - val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK); - } else { - val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK); - if (ipa->version == IPA_VERSION_4_2) - val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK); - else - val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK); + max1 = 12; + switch (version) { + case IPA_VERSION_3_5_1: + max0 = 8; + break; + case IPA_VERSION_4_0: + case IPA_VERSION_4_1: + max0 = 12; + break; + case IPA_VERSION_4_2: + max0 = 12; + max1 = 0; /* PCIe not present */ + break; + case IPA_VERSION_4_5: + max0 = 0; /* No limit (hardware maximum) */ + break; + } + val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK); + val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK); + if (version != IPA_VERSION_3_5_1) { /* GEN_QMB_0_MAX_READS_BEATS is 0 */ /* GEN_QMB_1_MAX_READS_BEATS is 0 */ } iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET); } +/* IPA uses unified Qtime starting at IPA v4.5, implementing various + * timestamps and timers independent of the IPA core clock rate. The + * Qtimer is based on a 56-bit timestamp incremented at each tick of + * a 19.2 MHz SoC crystal oscillator (XO clock). + * + * For IPA timestamps (tag, NAT, data path logging) a lower resolution + * timestamp is achieved by shifting the Qtimer timestamp value right + * some number of bits to produce the low-order bits of the coarser + * granularity timestamp. + * + * For timers, a common timer clock is derived from the XO clock using + * a divider (we use 192, to produce a 100kHz timer clock). From + * this common clock, three "pulse generators" are used to produce + * timer ticks at a configurable frequency. IPA timers (such as + * those used for aggregation or head-of-line block handling) now + * define their period based on one of these pulse generators. + */ +static void ipa_qtime_config(struct ipa *ipa) +{ + u32 val; + + /* Timer clock divider must be disabled when we change the rate */ + iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); + + /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */ + val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK); + val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK); + /* Configure tag and NAT Qtime timestamp resolution as well */ + val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK); + val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK); + iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET); + + /* Set granularity of pulse generators used for other timers */ + val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK); + val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK); + val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK); + iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET); + + /* Actual divider is 1 more than value supplied here */ + val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK); + iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); + + /* Divider value is set; re-enable the common timer clock divider */ + val |= u32_encode_bits(1, DIV_ENABLE_FMASK); + iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET); +} + static void ipa_idle_indication_cfg(struct ipa *ipa, u32 enter_idle_debounce_thresh, bool const_non_idle_enable) @@ -295,7 +373,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa, */ static void ipa_hardware_dcd_config(struct ipa *ipa) { - /* Recommended values for IPA 3.5 according to IPA HPG */ + /* Recommended values for IPA 3.5 and later according to IPA HPG */ ipa_idle_indication_cfg(ipa, 256, false); } @@ -311,22 +389,26 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa) */ static void ipa_hardware_config(struct ipa *ipa) { + enum ipa_version version = ipa->version; u32 granularity; u32 val; - /* Fill in backward-compatibility register, based on version */ - val = ipa_reg_bcr_val(ipa->version); - iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET); + /* IPA v4.5 has no backward compatibility register */ + if (version < IPA_VERSION_4_5) { + val = ipa_reg_bcr_val(version); + iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET); + } - if (ipa->version != IPA_VERSION_3_5_1) { - /* Enable open global clocks (hardware workaround) */ + /* Implement some hardware workarounds */ + if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) { + /* Enable open global clocks (not needed for IPA v4.5) */ val = GLOBAL_FMASK; val |= GLOBAL_2X_CLK_FMASK; iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET); - /* Disable PA mask to allow HOLB drop (hardware workaround) */ + /* Disable PA mask to allow HOLB drop */ val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); - val &= ~PA_MASK_EN; + val &= ~PA_MASK_EN_FMASK; iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET); } @@ -335,15 +417,21 @@ static void ipa_hardware_config(struct ipa *ipa) /* Configure system bus limits */ ipa_hardware_config_qsb(ipa); - /* Configure aggregation granularity */ - val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); - granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY); - val = u32_encode_bits(granularity, AGGR_GRANULARITY); - iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); + if (version < IPA_VERSION_4_5) { + /* Configure aggregation timer granularity */ + granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY); + val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK); + iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET); + } else { + ipa_qtime_config(ipa); + } - /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */ - if (ipa->version == IPA_VERSION_4_2) - iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET); + /* IPA v4.2 does not support hashed tables, so disable them */ + if (version == IPA_VERSION_4_2) { + u32 offset = ipa_reg_filt_rout_hash_en_offset(version); + + iowrite32(0, ipa->reg_virt + offset); + } /* Enable dynamic clock division */ ipa_hardware_dcd_config(ipa); @@ -363,52 +451,41 @@ static void ipa_hardware_deconfig(struct ipa *ipa) #ifdef IPA_VALIDATION -/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */ -static int ipa_resource_group_count(struct ipa *ipa) -{ - switch (ipa->version) { - case IPA_VERSION_3_5_1: - return 3; - - case IPA_VERSION_4_0: - case IPA_VERSION_4_1: - return 4; - - case IPA_VERSION_4_2: - return 1; - - default: - return 0; - } -} - static bool ipa_resource_limits_valid(struct ipa *ipa, const struct ipa_resource_data *data) { - u32 group_count = ipa_resource_group_count(ipa); + u32 group_count; u32 i; u32 j; - if (!group_count) + /* We program at most 6 source or destination resource group limits */ + BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6); + + group_count = ipa_resource_group_src_count(ipa->version); + if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX) return false; - /* Return an error if a non-zero resource group limit is specified - * for a resource not supported by hardware. + /* Return an error if a non-zero resource limit is specified + * for a resource group not supported by hardware. */ for (i = 0; i < data->resource_src_count; i++) { const struct ipa_resource_src *resource; resource = &data->resource_src[i]; - for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++) + for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++) if (resource->limits[j].min || resource->limits[j].max) return false; } + group_count = ipa_resource_group_dst_count(ipa->version); + if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX) + return false; + for (i = 0; i < data->resource_dst_count; i++) { const struct ipa_resource_dst *resource; resource = &data->resource_dst[i]; - for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++) + for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++) if (resource->limits[j].min || resource->limits[j].max) return false; } @@ -435,46 +512,64 @@ ipa_resource_config_common(struct ipa *ipa, u32 offset, val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK); val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK); - val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK); - val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK); + if (ylimits) { + val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK); + val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK); + } iowrite32(val, ipa->reg_virt + offset); } -static void ipa_resource_config_src_01(struct ipa *ipa, - const struct ipa_resource_src *resource) +static void ipa_resource_config_src(struct ipa *ipa, + const struct ipa_resource_src *resource) { - u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); + u32 group_count = ipa_resource_group_src_count(ipa->version); + const struct ipa_resource_limits *ylimits; + u32 offset; - ipa_resource_config_common(ipa, offset, - &resource->limits[0], &resource->limits[1]); -} + offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); + ylimits = group_count == 1 ? NULL : &resource->limits[1]; + ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); -static void ipa_resource_config_src_23(struct ipa *ipa, - const struct ipa_resource_src *resource) -{ - u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); + if (group_count < 2) + return; - ipa_resource_config_common(ipa, offset, - &resource->limits[2], &resource->limits[3]); -} + offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); + ylimits = group_count == 3 ? NULL : &resource->limits[3]; + ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); -static void ipa_resource_config_dst_01(struct ipa *ipa, - const struct ipa_resource_dst *resource) -{ - u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); + if (group_count < 4) + return; - ipa_resource_config_common(ipa, offset, - &resource->limits[0], &resource->limits[1]); + offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type); + ylimits = group_count == 5 ? NULL : &resource->limits[5]; + ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); } -static void ipa_resource_config_dst_23(struct ipa *ipa, - const struct ipa_resource_dst *resource) +static void ipa_resource_config_dst(struct ipa *ipa, + const struct ipa_resource_dst *resource) { - u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); + u32 group_count = ipa_resource_group_dst_count(ipa->version); + const struct ipa_resource_limits *ylimits; + u32 offset; + + offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type); + ylimits = group_count == 1 ? NULL : &resource->limits[1]; + ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits); - ipa_resource_config_common(ipa, offset, - &resource->limits[2], &resource->limits[3]); + if (group_count < 2) + return; + + offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type); + ylimits = group_count == 3 ? NULL : &resource->limits[3]; + ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits); + + if (group_count < 4) + return; + + offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type); + ylimits = group_count == 5 ? NULL : &resource->limits[5]; + ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits); } static int @@ -485,15 +580,11 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data) if (!ipa_resource_limits_valid(ipa, data)) return -EINVAL; - for (i = 0; i < data->resource_src_count; i++) { - ipa_resource_config_src_01(ipa, &data->resource_src[i]); - ipa_resource_config_src_23(ipa, &data->resource_src[i]); - } + for (i = 0; i < data->resource_src_count; i++) + ipa_resource_config_src(ipa, data->resource_src); - for (i = 0; i < data->resource_dst_count; i++) { - ipa_resource_config_dst_01(ipa, &data->resource_dst[i]); - ipa_resource_config_dst_23(ipa, &data->resource_dst[i]); - } + for (i = 0; i < data->resource_dst_count; i++) + ipa_resource_config_dst(ipa, data->resource_dst); return 0; } @@ -678,16 +769,13 @@ static void ipa_validate_build(void) */ BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX); - /* Exceeding 128 bytes makes the transaction pool *much* larger */ - BUILD_BUG_ON(sizeof(struct gsi_trans) > 128); - /* This is used as a divisor */ BUILD_BUG_ON(!IPA_AGGR_GRANULARITY); /* Aggregation granularity value can't be 0, and must fit */ BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > - field_max(AGGR_GRANULARITY)); + field_max(AGGR_GRANULARITY_FMASK)); #endif /* IPA_VALIDATE */ } @@ -720,15 +808,21 @@ static int ipa_probe(struct platform_device *pdev) const struct ipa_data *data; struct ipa_clock *clock; struct rproc *rproc; - bool modem_alloc; bool modem_init; struct ipa *ipa; - bool prefetch; phandle ph; int ret; ipa_validate_build(); + /* Get configuration data early; needed for clock initialization */ + data = of_device_get_match_data(dev); + if (!data) { + /* This is really IPA_VALIDATE (should never happen) */ + dev_err(dev, "matched hardware not supported\n"); + return -ENODEV; + } + /* If we need Trust Zone, make sure it's available */ modem_init = of_property_read_bool(dev->of_node, "modem-init"); if (!modem_init) @@ -749,22 +843,13 @@ static int ipa_probe(struct platform_device *pdev) /* The clock and interconnects might not be ready when we're * probed, so might return -EPROBE_DEFER. */ - clock = ipa_clock_init(dev); + clock = ipa_clock_init(dev, data->clock_data); if (IS_ERR(clock)) { ret = PTR_ERR(clock); goto err_rproc_put; } - /* No more EPROBE_DEFER. Get our configuration data */ - data = of_device_get_match_data(dev); - if (!data) { - /* This is really IPA_VALIDATE (should never happen) */ - dev_err(dev, "matched hardware not supported\n"); - ret = -ENOTSUPP; - goto err_clock_exit; - } - - /* Allocate and initialize the IPA structure */ + /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); if (!ipa) { ret = -ENOMEM; @@ -785,17 +870,12 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_reg_exit; - /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */ - prefetch = ipa->version != IPA_VERSION_3_5_1; - /* IPA v4.2 requires the AP to allocate channels for the modem */ - modem_alloc = ipa->version == IPA_VERSION_4_2; - - ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count, - data->endpoint_data, modem_alloc); + ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, + data->endpoint_data); if (ret) goto err_mem_exit; - /* Result is a non-zero mask endpoints that support filtering */ + /* Result is a non-zero mask of endpoints that support filtering */ ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count, data->endpoint_data); if (!ipa->filter_map) { @@ -870,6 +950,11 @@ static int ipa_remove(struct platform_device *pdev) if (ipa->setup_complete) { ret = ipa_modem_stop(ipa); + /* If starting or stopping is in progress, try once more */ + if (ret == -EBUSY) { + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + ret = ipa_modem_stop(ipa); + } if (ret) return ret; @@ -890,6 +975,15 @@ static int ipa_remove(struct platform_device *pdev) return 0; } +static void ipa_shutdown(struct platform_device *pdev) +{ + int ret; + + ret = ipa_remove(pdev); + if (ret) + dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret); +} + /** * ipa_suspend() - Power management system suspend callback * @dev: IPA device structure @@ -947,8 +1041,9 @@ static const struct dev_pm_ops ipa_pm_ops = { }; static struct platform_driver ipa_driver = { - .probe = ipa_probe, - .remove = ipa_remove, + .probe = ipa_probe, + .remove = ipa_remove, + .shutdown = ipa_shutdown, .driver = { .name = "ipa", .pm = &ipa_pm_ops, diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 2d45c444a67f..0cc3a3374caa 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -89,7 +89,7 @@ int ipa_mem_setup(struct ipa *ipa) gsi_trans_commit_wait(trans); /* Tell the hardware where the processing context area is located */ - iowrite32(ipa->mem_offset + offset, + iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET); return 0; @@ -160,13 +160,13 @@ int ipa_mem_config(struct ipa *ipa) mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK); /* If the sizes don't match, issue a warning */ - if (ipa->mem_offset + mem_size > ipa->mem_size) { - dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n", - mem_size); - } else if (ipa->mem_offset + mem_size < ipa->mem_size) { + if (ipa->mem_offset + mem_size < ipa->mem_size) { dev_warn(dev, "limiting IPA memory size to 0x%08x\n", mem_size); ipa->mem_size = mem_size; + } else if (ipa->mem_offset + mem_size > ipa->mem_size) { + dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n", + mem_size); } /* Prealloc DMA memory for zeroing regions */ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 5090f0f923ad..2fc64483f275 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -168,7 +168,7 @@ static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node) ipa_qmi->indication_sent = false; } -static struct qmi_ops ipa_server_ops = { +static const struct qmi_ops ipa_server_ops = { .bye = ipa_server_bye, }; @@ -234,7 +234,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi, } /* The server handles two request message types sent by the modem. */ -static struct qmi_msg_handler ipa_server_msg_handlers[] = { +static const struct qmi_msg_handler ipa_server_msg_handlers[] = { { .type = QMI_REQUEST, .msg_id = IPA_QMI_INDICATION_REGISTER, @@ -261,7 +261,7 @@ static void ipa_client_init_driver(struct qmi_handle *qmi, } /* The client handles one response message type sent by the modem. */ -static struct qmi_msg_handler ipa_client_msg_handlers[] = { +static const struct qmi_msg_handler ipa_client_msg_handlers[] = { { .type = QMI_RESPONSE, .msg_id = IPA_QMI_INIT_DRIVER, @@ -413,7 +413,7 @@ static void ipa_client_init_driver_work(struct work_struct *work) int ret; ipa_qmi = container_of(work, struct ipa_qmi, init_driver_work); - qmi = &ipa_qmi->client_handle, + qmi = &ipa_qmi->client_handle; ipa = container_of(ipa_qmi, struct ipa, qmi); dev = &ipa->pdev->dev; @@ -463,7 +463,7 @@ ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc) return 0; } -static struct qmi_ops ipa_client_ops = { +static const struct qmi_ops ipa_client_ops = { .new_server = ipa_client_new_server, }; diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h index cfac456cea0c..12b6621f4b0e 100644 --- a/drivers/net/ipa/ipa_qmi_msg.h +++ b/drivers/net/ipa/ipa_qmi_msg.h @@ -74,12 +74,12 @@ struct ipa_init_complete_ind { /* The AP tells the modem its platform type. We assume Android. */ enum ipa_platform_type { - IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */ - IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */ - IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */ - IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */ - IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */ - IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */ + IPA_QMI_PLATFORM_TYPE_INVALID = 0x0, /* Invalid */ + IPA_QMI_PLATFORM_TYPE_TN = 0x1, /* Data card */ + IPA_QMI_PLATFORM_TYPE_LE = 0x2, /* Data router */ + IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 0x3, /* Android MSM */ + IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 0x4, /* Windows MSM */ + IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */ }; /* This defines the start and end offset of a range of memory. Both diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index e542598fd775..e6b0827a244e 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -65,14 +65,15 @@ struct ipa; * of valid bits for the register. */ -#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038 - #define IPA_REG_COMP_CFG_OFFSET 0x0000003c +/* The next field is not supported for IPA v4.1 */ #define ENABLE_FMASK GENMASK(0, 0) #define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1) #define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2) #define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3) +/* The next field is not present for IPA v4.5 */ #define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4) +/* The remaining fields are not present for IPA v3.5.1 */ #define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5) #define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6) #define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7) @@ -86,6 +87,8 @@ struct ipa; #define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15) #define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16) #define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17) +/* The next field is present for IPA v4.5 */ +#define IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN_FMASK GENMASK(21, 21) #define IPA_REG_CLKON_CFG_OFFSET 0x00000044 #define RX_FMASK GENMASK(0, 0) @@ -105,11 +108,13 @@ struct ipa; #define ACK_MNGR_FMASK GENMASK(14, 14) #define D_DCPH_FMASK GENMASK(15, 15) #define H_DCPH_FMASK GENMASK(16, 16) +/* The next field is not present for IPA v4.5 */ #define DCMP_FMASK GENMASK(17, 17) #define NTF_TX_CMDQS_FMASK GENMASK(18, 18) #define TX_0_FMASK GENMASK(19, 19) #define TX_1_FMASK GENMASK(20, 20) #define FNR_FMASK GENMASK(21, 21) +/* The remaining fields are not present for IPA v3.5.1 */ #define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22) #define AGGR_WRAPPER_FMASK GENMASK(23, 23) #define RAM_SLAVEWAY_FMASK GENMASK(24, 24) @@ -118,6 +123,8 @@ struct ipa; #define GSI_IF_FMASK GENMASK(27, 27) #define GLOBAL_FMASK GENMASK(28, 28) #define GLOBAL_2X_CLK_FMASK GENMASK(29, 29) +/* The next field is present for IPA v4.5 */ +#define DPL_FIFO_FMASK GENMASK(30, 30) #define IPA_REG_ROUTE_OFFSET 0x00000048 #define ROUTE_DIS_FMASK GENMASK(0, 0) @@ -138,25 +145,17 @@ struct ipa; #define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078 #define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0) #define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4) -/* The next two fields are present for IPA v4.0 and above */ +/* The next two fields are not present for IPA v3.5.1 */ #define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16) #define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24) -static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version) +static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version) { if (version == IPA_VERSION_3_5_1) - return 0x0000010c; + return 0x000008c; - return 0x000000b4; + return 0x0000148; } -/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */ - -/* The next register is present for IPA v4.2 and above */ -#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148 -#define IPV6_ROUTER_HASH_EN GENMASK(0, 0) -#define IPV6_FILTER_HASH_EN GENMASK(4, 4) -#define IPV4_ROUTER_HASH_EN GENMASK(8, 8) -#define IPV4_FILTER_HASH_EN GENMASK(12, 12) static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version) { @@ -166,76 +165,108 @@ static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version) return 0x000014c; } -#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0) -#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4) -#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8) -#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12) +/* The next four fields are used for the hash enable and flush registers */ +#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0) +#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4) +#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8) +#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12) + +/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */ +static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version) +{ + if (version == IPA_VERSION_3_5_1) + return 0x0000010c; + + return 0x000000b4; +} +/* The next register is not present for IPA v4.5 */ #define IPA_REG_BCR_OFFSET 0x000001d0 -#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0) -#define BCR_TX_NOT_USING_BRESP BIT(1) -#define BCR_SUSPEND_L2_IRQ BIT(3) -#define BCR_HOLB_DROP_L2_IRQ BIT(4) -#define BCR_DUAL_TX BIT(5) +/* The next two fields are not present for IPA v4.2 */ +#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0) +#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1) +/* The next field is invalid for IPA v4.1 */ +#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2) +/* The next two fields are not present for IPA v4.2 */ +#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3) +#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4) +#define BCR_DUAL_TX_FMASK GENMASK(5, 5) +#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6) +#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7) +#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8) +#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9) /* Backward compatibility register value to use for each version */ static inline u32 ipa_reg_bcr_val(enum ipa_version version) { if (version == IPA_VERSION_3_5_1) - return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP | - BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX; + return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK | + BCR_TX_NOT_USING_BRESP_FMASK | + BCR_SUSPEND_L2_IRQ_FMASK | + BCR_HOLB_DROP_L2_IRQ_FMASK | + BCR_DUAL_TX_FMASK; if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1) - return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ | - BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX; + return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK | + BCR_SUSPEND_L2_IRQ_FMASK | + BCR_HOLB_DROP_L2_IRQ_FMASK | + BCR_DUAL_TX_FMASK; + + /* assert(version != IPA_VERSION_4_5); */ return 0x00000000; } +/* The value of the next register must be a multiple of 8 */ #define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8 -#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec /* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */ +#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec + +/* The next register is not present for IPA v4.5 */ +#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0 +#define AGGR_GRANULARITY_FMASK GENMASK(8, 4) /* The internal inactivity timer clock is used for the aggregation timer */ -#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ +#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ -#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0 -#define AGGR_GRANULARITY GENMASK(8, 4) /* Compute the value to use in the AGGR_GRANULARITY field representing the * given number of microseconds. The value is one less than the number of - * timer ticks in the requested period. Zero not a valid granularity value. + * timer ticks in the requested period. 0 not a valid granularity value. */ static inline u32 ipa_aggr_granularity_val(u32 usec) { return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; } +/* The next register is not present for IPA v4.5 */ #define IPA_REG_TX_CFG_OFFSET 0x000001fc /* The first three fields are present for IPA v3.5.1 only */ -#define TX0_PREFETCH_DISABLE GENMASK(0, 0) -#define TX1_PREFETCH_DISABLE GENMASK(1, 1) -#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2) -/* The next fields are present for IPA v4.0 and above */ -#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2) -#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6) -#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10) -#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11) -#define PA_MASK_EN GENMASK(12, 12) -#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13) -/* The last two fields are present for IPA v4.2 and above */ -#define SSPND_PA_NO_START_STATE GENMASK(18, 18) -#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19) +#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0) +#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1) +#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2) +/* The next six fields are present for IPA v4.0 and above */ +#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2) +#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6) +#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10) +#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11) +#define PA_MASK_EN_FMASK GENMASK(12, 12) +#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13) +/* The next field is present for IPA v4.5 */ +#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17) +/* The next two fields are present for IPA v4.2 only */ +#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18) +#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19) #define IPA_REG_FLAVOR_0_OFFSET 0x00000210 -#define BAM_MAX_PIPES_FMASK GENMASK(4, 0) -#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8) -#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16) -#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24) +#define IPA_MAX_PIPES_FMASK GENMASK(3, 0) +#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8) +#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16) +#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24) static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) { - if (version == IPA_VERSION_4_2) + if (version >= IPA_VERSION_4_2) return 0x00000240; return 0x00000220; @@ -244,25 +275,102 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0) #define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16) +/* The next register is present for IPA v4.5 */ +#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c +#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0) +#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7) +#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8) +#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16) + +/* The next register is present for IPA v4.5 */ +#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250 +#define DIV_VALUE_FMASK GENMASK(8, 0) +#define DIV_ENABLE_FMASK GENMASK(31, 31) + +/* The next register is present for IPA v4.5 */ +#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254 +#define GRAN_0_FMASK GENMASK(2, 0) +#define GRAN_1_FMASK GENMASK(5, 3) +#define GRAN_2_FMASK GENMASK(8, 6) +/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */ +enum ipa_pulse_gran { + IPA_GRAN_10_US = 0x0, + IPA_GRAN_20_US = 0x1, + IPA_GRAN_50_US = 0x2, + IPA_GRAN_100_US = 0x3, + IPA_GRAN_1_MS = 0x4, + IPA_GRAN_10_MS = 0x5, + IPA_GRAN_100_MS = 0x6, + IPA_GRAN_655350_US = 0x7, +}; + +/* # IPA source resource groups available based on version */ +static inline u32 ipa_resource_group_src_count(enum ipa_version version) +{ + switch (version) { + case IPA_VERSION_3_5_1: + case IPA_VERSION_4_0: + case IPA_VERSION_4_1: + return 4; + + case IPA_VERSION_4_2: + return 1; + + case IPA_VERSION_4_5: + return 5; + + default: + return 0; + } +} + +/* # IPA destination resource groups available based on version */ +static inline u32 ipa_resource_group_dst_count(enum ipa_version version) +{ + switch (version) { + case IPA_VERSION_3_5_1: + return 3; + + case IPA_VERSION_4_0: + case IPA_VERSION_4_1: + return 4; + + case IPA_VERSION_4_2: + return 1; + + case IPA_VERSION_4_5: + return 5; + + default: + return 0; + } +} + +/* Not all of the following are valid (depends on the count, above) */ #define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \ (0x00000400 + 0x0020 * (rt)) #define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \ (0x00000404 + 0x0020 * (rt)) +/* The next register is only present for IPA v4.5 */ #define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \ (0x00000408 + 0x0020 * (rt)) #define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \ (0x00000500 + 0x0020 * (rt)) #define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \ (0x00000504 + 0x0020 * (rt)) +/* The next register is only present for IPA v4.5 */ #define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \ (0x00000508 + 0x0020 * (rt)) +/* The next four fields are used for all resource group registers */ #define X_MIN_LIM_FMASK GENMASK(5, 0) #define X_MAX_LIM_FMASK GENMASK(13, 8) +/* The next two fields are not always present (if resource count is odd) */ #define Y_MIN_LIM_FMASK GENMASK(21, 16) #define Y_MAX_LIM_FMASK GENMASK(29, 24) #define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \ (0x00000800 + 0x0070 * (ep)) +/* The next field should only used for IPA v3.5.1 */ #define ENDP_SUSPEND_FMASK GENMASK(0, 0) #define ENDP_DELAY_FMASK GENMASK(1, 1) @@ -273,6 +381,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3) #define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8) +/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */ +enum ipa_cs_offload_en { + IPA_CS_OFFLOAD_NONE = 0x0, + IPA_CS_OFFLOAD_UL = 0x1, + IPA_CS_OFFLOAD_DL = 0x2, +}; + #define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \ (0x00000810 + 0x0070 * (ep)) #define HDR_LEN_FMASK GENMASK(5, 0) @@ -283,7 +398,45 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20) #define HDR_A5_MUX_FMASK GENMASK(26, 26) #define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27) +/* The next field is not present for IPA v4.5 */ #define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28) +/* The next two fields are present for IPA v4.5 */ +#define HDR_LEN_MSB_FMASK GENMASK(29, 28) +#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30) + +/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */ +static inline u32 ipa_header_size_encoded(enum ipa_version version, + u32 header_size) +{ + u32 val; + + val = u32_encode_bits(header_size, HDR_LEN_FMASK); + if (version < IPA_VERSION_4_5) + return val; + + /* IPA v4.5 adds a few more most-significant bits */ + header_size >>= hweight32(HDR_LEN_FMASK); + val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK); + + return val; +} + +/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */ +static inline u32 ipa_metadata_offset_encoded(enum ipa_version version, + u32 offset) +{ + u32 val; + + val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK); + if (version < IPA_VERSION_4_5) + return val; + + /* IPA v4.5 adds a few more most-significant bits */ + offset >>= hweight32(HDR_OFST_METADATA_FMASK); + val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK); + + return val; +} #define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \ (0x00000814 + 0x0070 * (ep)) @@ -293,6 +446,10 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3) #define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4) #define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10) +/* The next three fields are present for IPA v4.5 */ +#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16) +#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18) +#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20) /* Valid only for RX (IPA producer) endpoints */ #define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \ @@ -302,22 +459,77 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \ (0x00000820 + 0x0070 * (txep)) #define MODE_FMASK GENMASK(2, 0) +/* The next field is present for IPA v4.5 */ +#define DCPH_ENABLE_FMASK GENMASK(3, 3) #define DEST_PIPE_INDEX_FMASK GENMASK(8, 4) #define BYTE_THRESHOLD_FMASK GENMASK(27, 12) #define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28) #define PAD_EN_FMASK GENMASK(29, 29) +/* The next register is not present for IPA v4.5 */ #define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30) +/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */ +enum ipa_mode { + IPA_BASIC = 0x0, + IPA_ENABLE_FRAMING_HDLC = 0x1, + IPA_ENABLE_DEFRAMING_HDLC = 0x2, + IPA_DMA = 0x3, +}; + #define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \ (0x00000824 + 0x0070 * (ep)) #define AGGR_EN_FMASK GENMASK(1, 0) #define AGGR_TYPE_FMASK GENMASK(4, 2) -#define AGGR_BYTE_LIMIT_FMASK GENMASK(9, 5) -#define AGGR_TIME_LIMIT_FMASK GENMASK(14, 10) -#define AGGR_PKT_LIMIT_FMASK GENMASK(20, 15) -#define AGGR_SW_EOF_ACTIVE_FMASK GENMASK(21, 21) -#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22) -#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24) +static inline u32 aggr_byte_limit_fmask(bool legacy) +{ + return legacy ? GENMASK(9, 5) : GENMASK(10, 5); +} + +static inline u32 aggr_time_limit_fmask(bool legacy) +{ + return legacy ? GENMASK(14, 10) : GENMASK(16, 12); +} + +static inline u32 aggr_pkt_limit_fmask(bool legacy) +{ + return legacy ? GENMASK(20, 15) : GENMASK(22, 17); +} + +static inline u32 aggr_sw_eof_active_fmask(bool legacy) +{ + return legacy ? GENMASK(21, 21) : GENMASK(23, 23); +} + +static inline u32 aggr_force_close_fmask(bool legacy) +{ + return legacy ? GENMASK(22, 22) : GENMASK(24, 24); +} + +static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy) +{ + return legacy ? GENMASK(24, 24) : GENMASK(26, 26); +} + +/* The next field is present for IPA v4.5 */ +#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27) + +/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */ +enum ipa_aggr_en { + IPA_BYPASS_AGGR = 0x0, + IPA_ENABLE_AGGR = 0x1, + IPA_ENABLE_DEAGGR = 0x2, +}; + +/** enum ipa_aggr_type - aggregation type field in ENDP_INIT_AGGR_N */ +enum ipa_aggr_type { + IPA_MBIM_16 = 0x0, + IPA_HDLC = 0x1, + IPA_TLP = 0x2, + IPA_RNDIS = 0x3, + IPA_GENERIC = 0x4, + IPA_COALESCE = 0x5, + IPA_QCMAP = 0x6, +}; /* Valid only for RX (IPA producer) endpoints */ #define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \ @@ -327,21 +539,37 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) /* Valid only for RX (IPA producer) endpoints */ #define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \ (0x00000830 + 0x0070 * (rxep)) -/* The next fields are present for IPA v4.2 only */ +/* The next two fields are present for IPA v4.2 only */ #define BASE_VALUE_FMASK GENMASK(4, 0) #define SCALE_FMASK GENMASK(12, 8) +/* The next two fields are present for IPA v4.5 */ +#define TIME_LIMIT_FMASK GENMASK(4, 0) +#define GRAN_SEL_FMASK GENMASK(8, 8) /* Valid only for TX (IPA consumer) endpoints */ #define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \ (0x00000834 + 0x0070 * (txep)) #define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0) +#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6) #define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7) #define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8) +#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14) #define MAX_PACKET_LEN_FMASK GENMASK(31, 16) #define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \ (0x00000838 + 0x0070 * (ep)) -#define RSRC_GRP_FMASK GENMASK(1, 0) +/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */ +static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp) +{ + switch (version) { + case IPA_VERSION_4_2: + return u32_encode_bits(rsrc_grp, GENMASK(0, 0)); + case IPA_VERSION_4_5: + return u32_encode_bits(rsrc_grp, GENMASK(2, 0)); + default: + return u32_encode_bits(rsrc_grp, GENMASK(1, 0)); + } +} /* Valid only for TX (IPA consumer) endpoints */ #define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \ @@ -351,15 +579,35 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8) #define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12) +/** + * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N + * @IPA_SEQ_DMA_ONLY: only DMA is performed + * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP: + * second packet processing pass + no decipher + microcontroller + * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP: + * packet processing + no decipher + no uCP + HPS REP DMA parser + * @IPA_SEQ_INVALID: invalid sequencer type + * + * The values defined here are broken into 4-bit nibbles that are written + * into fields of the ENDP_INIT_SEQ registers. + */ +enum ipa_seq_type { + IPA_SEQ_DMA_ONLY = 0x0000, + IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004, + IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806, + IPA_SEQ_INVALID = 0xffff, +}; + #define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \ (0x00000840 + 0x0070 * (ep)) #define STATUS_EN_FMASK GENMASK(0, 0) #define STATUS_ENDP_FMASK GENMASK(5, 1) +/* The next field is not present for IPA v4.5 */ #define STATUS_LOCATION_FMASK GENMASK(8, 8) -/* The next field is present for IPA v4.0 and above */ +/* The next field is not present for IPA v3.5.1 */ #define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9) -/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */ +/* The next register is only present for IPA versions that support hashing */ #define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \ (0x0000085c + 0x0070 * (er)) #define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0) @@ -394,89 +642,69 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP) #define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \ (0x00003010 + 0x1000 * (ee)) +/** + * enum ipa_irq_id - Bit positions representing type of IPA IRQ + * @IPA_IRQ_UC_0: Microcontroller event interrupt + * @IPA_IRQ_UC_1: Microcontroller response interrupt + * @IPA_IRQ_TX_SUSPEND: Data ready interrupt + * + * IRQ types not described above are not currently used. + */ +enum ipa_irq_id { + IPA_IRQ_BAD_SNOC_ACCESS = 0x0, + /* Type (bit) 0x1 is not defined */ + IPA_IRQ_UC_0 = 0x2, + IPA_IRQ_UC_1 = 0x3, + IPA_IRQ_UC_2 = 0x4, + IPA_IRQ_UC_3 = 0x5, + IPA_IRQ_UC_IN_Q_NOT_EMPTY = 0x6, + IPA_IRQ_UC_RX_CMD_Q_NOT_FULL = 0x7, + IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY = 0x8, + IPA_IRQ_RX_ERR = 0x9, + IPA_IRQ_DEAGGR_ERR = 0xa, + IPA_IRQ_TX_ERR = 0xb, + IPA_IRQ_STEP_MODE = 0xc, + IPA_IRQ_PROC_ERR = 0xd, + IPA_IRQ_TX_SUSPEND = 0xe, + IPA_IRQ_TX_HOLB_DROP = 0xf, + IPA_IRQ_BAM_GSI_IDLE = 0x10, + IPA_IRQ_PIPE_YELLOW_BELOW = 0x11, + IPA_IRQ_PIPE_RED_BELOW = 0x12, + IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13, + IPA_IRQ_PIPE_RED_ABOVE = 0x14, + IPA_IRQ_UCP = 0x15, + IPA_IRQ_DCMP = 0x16, + IPA_IRQ_GSI_EE = 0x17, + IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18, + IPA_IRQ_GSI_UC = 0x19, + /* The next bit is present for IPA v4.5 */ + IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a, + IPA_IRQ_COUNT, /* Last; not an id */ +}; #define IPA_REG_IRQ_UC_OFFSET \ IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP) #define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \ (0x0000301c + 0x1000 * (ee)) +#define UC_INTR_FMASK GENMASK(0, 0) +/* ipa->available defines the valid bits in the SUSPEND_INFO register */ #define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \ IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP) #define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \ (0x00003030 + 0x1000 * (ee)) -/* ipa->available defines the valid bits in the SUSPEND_INFO register */ -#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \ - IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \ +/* ipa->available defines the valid bits in the IRQ_SUSPEND_EN register */ +#define IPA_REG_IRQ_SUSPEND_EN_OFFSET \ + IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(GSI_EE_AP) +#define IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(ee) \ (0x00003034 + 0x1000 * (ee)) -/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */ -#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \ - IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP) -#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \ +/* ipa->available defines the valid bits in the IRQ_SUSPEND_CLR register */ +#define IPA_REG_IRQ_SUSPEND_CLR_OFFSET \ + IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(GSI_EE_AP) +#define IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(ee) \ (0x00003038 + 0x1000 * (ee)) -/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */ - -/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */ -enum ipa_cs_offload_en { - IPA_CS_OFFLOAD_NONE = 0, - IPA_CS_OFFLOAD_UL = 1, - IPA_CS_OFFLOAD_DL = 2, - IPA_CS_RSVD -}; - -/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */ -enum ipa_aggr_en { - IPA_BYPASS_AGGR = 0, - IPA_ENABLE_AGGR = 1, - IPA_ENABLE_DEAGGR = 2, -}; - -/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */ -enum ipa_aggr_type { - IPA_MBIM_16 = 0, - IPA_HDLC = 1, - IPA_TLP = 2, - IPA_RNDIS = 3, - IPA_GENERIC = 4, - IPA_COALESCE = 5, - IPA_QCMAP = 6, -}; - -/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */ -enum ipa_mode { - IPA_BASIC = 0, - IPA_ENABLE_FRAMING_HDLC = 1, - IPA_ENABLE_DEFRAMING_HDLC = 2, - IPA_DMA = 3, -}; - -/** - * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N - * @IPA_SEQ_DMA_ONLY: only DMA is performed - * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP: - * packet processing + no decipher + microcontroller (Ethernet Bridging) - * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP: - * second packet processing pass + no decipher + microcontroller - * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher - * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression - * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP: - * packet processing + no decipher + no uCP + HPS REP DMA parser - * @IPA_SEQ_INVALID: invalid sequencer type - * - * The values defined here are broken into 4-bit nibbles that are written - * into fields of the INIT_SEQ_N endpoint registers. - */ -enum ipa_seq_type { - IPA_SEQ_DMA_ONLY = 0x0000, - IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002, - IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004, - IPA_SEQ_DMA_DEC = 0x0011, - IPA_SEQ_DMA_COMP_DECOMP = 0x0020, - IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806, - IPA_SEQ_INVALID = 0xffff, -}; int ipa_reg_init(struct ipa *ipa); void ipa_reg_exit(struct ipa *ipa); diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index b3790aa952a1..32e2d3e052d5 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -422,8 +422,8 @@ int ipa_table_hash_flush(struct ipa *ipa) return -EBUSY; } - val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH; - val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH; + val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK; + val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK; ipa_cmd_register_write_add(trans, offset, val, val, false); diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index b382d47bc70d..dee58a6596d4 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -86,32 +86,32 @@ struct ipa_uc_mem_area { /** enum ipa_uc_command - commands from the AP to the microcontroller */ enum ipa_uc_command { - IPA_UC_COMMAND_NO_OP = 0, - IPA_UC_COMMAND_UPDATE_FLAGS = 1, - IPA_UC_COMMAND_DEBUG_RUN_TEST = 2, - IPA_UC_COMMAND_DEBUG_GET_INFO = 3, - IPA_UC_COMMAND_ERR_FATAL = 4, - IPA_UC_COMMAND_CLK_GATE = 5, - IPA_UC_COMMAND_CLK_UNGATE = 6, - IPA_UC_COMMAND_MEMCPY = 7, - IPA_UC_COMMAND_RESET_PIPE = 8, - IPA_UC_COMMAND_REG_WRITE = 9, - IPA_UC_COMMAND_GSI_CH_EMPTY = 10, + IPA_UC_COMMAND_NO_OP = 0x0, + IPA_UC_COMMAND_UPDATE_FLAGS = 0x1, + IPA_UC_COMMAND_DEBUG_RUN_TEST = 0x2, + IPA_UC_COMMAND_DEBUG_GET_INFO = 0x3, + IPA_UC_COMMAND_ERR_FATAL = 0x4, + IPA_UC_COMMAND_CLK_GATE = 0x5, + IPA_UC_COMMAND_CLK_UNGATE = 0x6, + IPA_UC_COMMAND_MEMCPY = 0x7, + IPA_UC_COMMAND_RESET_PIPE = 0x8, + IPA_UC_COMMAND_REG_WRITE = 0x9, + IPA_UC_COMMAND_GSI_CH_EMPTY = 0xa, }; /** enum ipa_uc_response - microcontroller response codes */ enum ipa_uc_response { - IPA_UC_RESPONSE_NO_OP = 0, - IPA_UC_RESPONSE_INIT_COMPLETED = 1, - IPA_UC_RESPONSE_CMD_COMPLETED = 2, - IPA_UC_RESPONSE_DEBUG_GET_INFO = 3, + IPA_UC_RESPONSE_NO_OP = 0x0, + IPA_UC_RESPONSE_INIT_COMPLETED = 0x1, + IPA_UC_RESPONSE_CMD_COMPLETED = 0x2, + IPA_UC_RESPONSE_DEBUG_GET_INFO = 0x3, }; /** enum ipa_uc_event - common cpu events reported by the microcontroller */ enum ipa_uc_event { - IPA_UC_EVENT_NO_OP = 0, - IPA_UC_EVENT_ERROR = 1, - IPA_UC_EVENT_LOG_INFO = 2, + IPA_UC_EVENT_NO_OP = 0x0, + IPA_UC_EVENT_ERROR = 0x1, + IPA_UC_EVENT_LOG_INFO = 0x2, }; static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa) @@ -129,9 +129,10 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) if (shared->event == IPA_UC_EVENT_ERROR) dev_err(dev, "microcontroller error event\n"); - else + else if (shared->event != IPA_UC_EVENT_LOG_INFO) dev_err(dev, "unsupported microcontroller event %hhu\n", shared->event); + /* The LOG_INFO event can be safely ignored */ } /* Microcontroller response IPA interrupt handler */ @@ -191,14 +192,19 @@ void ipa_uc_teardown(struct ipa *ipa) static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param) { struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); + u32 val; + /* Fill in the command data */ shared->command = command; shared->command_param = cpu_to_le32(command_param); shared->command_param_hi = 0; shared->response = 0; shared->response_param = 0; - iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET); + /* Use an interrupt to tell the microcontroller the command is ready */ + val = u32_encode_bits(1, UC_INTR_FMASK); + + iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET); } /* Tell the microcontroller the AP is shutting down */ diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h index 85449df0f512..2944e2a89023 100644 --- a/drivers/net/ipa/ipa_version.h +++ b/drivers/net/ipa/ipa_version.h @@ -18,6 +18,7 @@ enum ipa_version { IPA_VERSION_4_0, /* GSI version 2.0 */ IPA_VERSION_4_1, /* GSI version 2.1 */ IPA_VERSION_4_2, /* GSI version 2.2 */ + IPA_VERSION_4_5, /* GSI version 2.5 */ }; #endif /* _IPA_VERSION_H_ */ diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 60b7d93bb834..a707502a0c0f 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -2,6 +2,8 @@ /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> */ +#include <linux/ethtool.h> + #include "ipvlan.h" static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 11ca5fa902a1..92425e1fd70c 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -101,6 +101,7 @@ struct pcpu_secy_stats { * @real_dev: pointer to underlying netdevice * @stats: MACsec device stats * @secys: linked list of SecY's on the underlying device + * @gro_cells: pointer to the Generic Receive Offload cell * @offload: status of offloading on the MACsec device */ struct macsec_dev { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index c8d803d3616c..fb51329f8964 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -35,7 +35,7 @@ #define MACVLAN_HASH_BITS 8 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) -#define MACVLAN_BC_QUEUE_LEN 1000 +#define MACVLAN_DEFAULT_BC_QUEUE_LEN 1000 #define MACVLAN_F_PASSTHRU 1 #define MACVLAN_F_ADDRCHANGE 2 @@ -46,6 +46,7 @@ struct macvlan_port { struct list_head vlans; struct sk_buff_head bc_queue; struct work_struct bc_work; + u32 bc_queue_len_used; u32 flags; int count; struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; @@ -67,6 +68,7 @@ struct macvlan_skb_cb { #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) static void macvlan_port_destroy(struct net_device *dev); +static void update_port_bc_queue_len(struct macvlan_port *port); static inline bool macvlan_passthru(const struct macvlan_port *port) { @@ -354,7 +356,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, MACVLAN_SKB_CB(nskb)->src = src; spin_lock(&port->bc_queue.lock); - if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { + if (skb_queue_len(&port->bc_queue) < port->bc_queue_len_used) { if (src) dev_hold(src->dev); __skb_queue_tail(&port->bc_queue, nskb); @@ -1096,7 +1098,7 @@ static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *real_dev = vlan->lowerdev; struct netpoll *netpoll; - int err = 0; + int err; netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); err = -ENOMEM; @@ -1218,6 +1220,7 @@ static int macvlan_port_create(struct net_device *dev) for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_source_hash[i]); + port->bc_queue_len_used = 0; skb_queue_head_init(&port->bc_queue); INIT_WORK(&port->bc_work, macvlan_process_broadcast); @@ -1339,7 +1342,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } -/** +/* * reconfigure list of remote source mac address * (only for macvlan devices in source mode) * Note regarding alignment: all netlink data is aligned to 4 Byte, which @@ -1486,6 +1489,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, goto destroy_macvlan_port; } + vlan->bc_queue_len_req = MACVLAN_DEFAULT_BC_QUEUE_LEN; + if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) + vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); + err = register_netdevice(dev); if (err < 0) goto destroy_macvlan_port; @@ -1496,6 +1503,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, goto unregister_netdev; list_add_tail_rcu(&vlan->list, &port->vlans); + update_port_bc_queue_len(vlan->port); netif_stacked_transfer_operstate(lowerdev, dev); linkwatch_fire_event(dev); @@ -1529,6 +1537,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) if (vlan->mode == MACVLAN_MODE_SOURCE) macvlan_flush_sources(vlan->port, vlan); list_del_rcu(&vlan->list); + update_port_bc_queue_len(vlan->port); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } @@ -1572,6 +1581,12 @@ static int macvlan_changelink(struct net_device *dev, } vlan->flags = flags; } + + if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) { + vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]); + update_port_bc_queue_len(vlan->port); + } + if (set_mode) vlan->mode = mode; if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { @@ -1602,6 +1617,8 @@ static size_t macvlan_get_size(const struct net_device *dev) + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */ + nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */ + macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */ + + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */ + + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */ ); } @@ -1625,6 +1642,7 @@ static int macvlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); + struct macvlan_port *port = vlan->port; int i; struct nlattr *nest; @@ -1645,6 +1663,10 @@ static int macvlan_fill_info(struct sk_buff *skb, } nla_nest_end(skb, nest); } + if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN, vlan->bc_queue_len_req)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, port->bc_queue_len_used)) + goto nla_put_failure; return 0; nla_put_failure: @@ -1658,6 +1680,8 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { [IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED }, [IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 }, + [IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 }, + [IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT }, }; int macvlan_link_register(struct rtnl_link_ops *ops) @@ -1688,6 +1712,18 @@ static struct rtnl_link_ops macvlan_link_ops = { .priv_size = sizeof(struct macvlan_dev), }; +static void update_port_bc_queue_len(struct macvlan_port *port) +{ + u32 max_bc_queue_len_req = 0; + struct macvlan_dev *vlan; + + list_for_each_entry(vlan, &port->vlans, list) { + if (vlan->bc_queue_len_req > max_bc_queue_len_req) + max_bc_queue_len_req = vlan->bc_queue_len_req; + } + port->bc_queue_len_used = max_bc_queue_len_req; +} + static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c new file mode 100644 index 000000000000..fa41d8c42f05 --- /dev/null +++ b/drivers/net/mhi_net.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* MHI Network driver - Network over MHI bus + * + * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> + */ + +#include <linux/if_arp.h> +#include <linux/mhi.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/u64_stats_sync.h> + +#define MHI_NET_MIN_MTU ETH_MIN_MTU +#define MHI_NET_MAX_MTU 0xffff +#define MHI_NET_DEFAULT_MTU 0x4000 + +struct mhi_net_stats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t rx_dropped; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + atomic_t rx_queued; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; +}; + +struct mhi_net_dev { + struct mhi_device *mdev; + struct net_device *ndev; + struct delayed_work rx_refill; + struct mhi_net_stats stats; + u32 rx_queue_sz; +}; + +static int mhi_ndo_open(struct net_device *ndev) +{ + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); + + /* Feed the rx buffer pool */ + schedule_delayed_work(&mhi_netdev->rx_refill, 0); + + /* Carrier is established via out-of-band channel (e.g. qmi) */ + netif_carrier_on(ndev); + + netif_start_queue(ndev); + + return 0; +} + +static int mhi_ndo_stop(struct net_device *ndev) +{ + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + cancel_delayed_work_sync(&mhi_netdev->rx_refill); + + return 0; +} + +static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); + struct mhi_device *mdev = mhi_netdev->mdev; + int err; + + err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); + if (unlikely(err)) { + net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", + ndev->name, err); + + u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); + u64_stats_inc(&mhi_netdev->stats.tx_dropped); + u64_stats_update_end(&mhi_netdev->stats.tx_syncp); + + /* drop the packet */ + dev_kfree_skb_any(skb); + } + + if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) + netif_stop_queue(ndev); + + return NETDEV_TX_OK; +} + +static void mhi_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); + stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); + stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); + stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); + stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); + } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); + stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); + stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); + stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); + stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); + } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); +} + +static const struct net_device_ops mhi_netdev_ops = { + .ndo_open = mhi_ndo_open, + .ndo_stop = mhi_ndo_stop, + .ndo_start_xmit = mhi_ndo_xmit, + .ndo_get_stats64 = mhi_ndo_get_stats64, +}; + +static void mhi_net_setup(struct net_device *ndev) +{ + ndev->header_ops = NULL; /* No header */ + ndev->type = ARPHRD_NONE; /* QMAP... */ + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->flags = IFF_POINTOPOINT | IFF_NOARP; + ndev->netdev_ops = &mhi_netdev_ops; + ndev->mtu = MHI_NET_DEFAULT_MTU; + ndev->min_mtu = MHI_NET_MIN_MTU; + ndev->max_mtu = MHI_NET_MAX_MTU; + ndev->tx_queue_len = 1000; +} + +static void mhi_net_dl_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + int remaining; + + remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued); + + if (unlikely(mhi_res->transaction_status)) { + dev_kfree_skb_any(skb); + + /* MHI layer stopping/resetting the DL channel */ + if (mhi_res->transaction_status == -ENOTCONN) + return; + + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_netdev->stats.rx_errors); + u64_stats_update_end(&mhi_netdev->stats.rx_syncp); + } else { + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_netdev->stats.rx_packets); + u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); + u64_stats_update_end(&mhi_netdev->stats.rx_syncp); + + skb->protocol = htons(ETH_P_MAP); + skb_put(skb, mhi_res->bytes_xferd); + netif_rx(skb); + } + + /* Refill if RX buffers queue becomes low */ + if (remaining <= mhi_netdev->rx_queue_sz / 2) + schedule_delayed_work(&mhi_netdev->rx_refill, 0); +} + +static void mhi_net_ul_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); + struct net_device *ndev = mhi_netdev->ndev; + struct mhi_device *mdev = mhi_netdev->mdev; + struct sk_buff *skb = mhi_res->buf_addr; + + /* Hardware has consumed the buffer, so free the skb (which is not + * freed by the MHI stack) and perform accounting. + */ + dev_consume_skb_any(skb); + + u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); + if (unlikely(mhi_res->transaction_status)) { + + /* MHI layer stopping/resetting the UL channel */ + if (mhi_res->transaction_status == -ENOTCONN) { + u64_stats_update_end(&mhi_netdev->stats.tx_syncp); + return; + } + + u64_stats_inc(&mhi_netdev->stats.tx_errors); + } else { + u64_stats_inc(&mhi_netdev->stats.tx_packets); + u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); + } + u64_stats_update_end(&mhi_netdev->stats.tx_syncp); + + if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mdev, DMA_TO_DEVICE)) + netif_wake_queue(ndev); +} + +static void mhi_net_rx_refill_work(struct work_struct *work) +{ + struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, + rx_refill.work); + struct net_device *ndev = mhi_netdev->ndev; + struct mhi_device *mdev = mhi_netdev->mdev; + int size = READ_ONCE(ndev->mtu); + struct sk_buff *skb; + int err; + + while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) { + skb = netdev_alloc_skb(ndev, size); + if (unlikely(!skb)) + break; + + err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); + if (unlikely(err)) { + net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", + ndev->name, err); + kfree_skb(skb); + break; + } + + atomic_inc(&mhi_netdev->stats.rx_queued); + + /* Do not hog the CPU if rx buffers are consumed faster than + * queued (unlikely). + */ + cond_resched(); + } + + /* If we're still starved of rx buffers, reschedule later */ + if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued))) + schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); +} + +static int mhi_net_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + const char *netname = (char *)id->driver_data; + struct device *dev = &mhi_dev->dev; + struct mhi_net_dev *mhi_netdev; + struct net_device *ndev; + int err; + + ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE, + mhi_net_setup); + if (!ndev) + return -ENOMEM; + + mhi_netdev = netdev_priv(ndev); + dev_set_drvdata(dev, mhi_netdev); + mhi_netdev->ndev = ndev; + mhi_netdev->mdev = mhi_dev; + SET_NETDEV_DEV(ndev, &mhi_dev->dev); + + /* All MHI net channels have 128 ring elements (at least for now) */ + mhi_netdev->rx_queue_sz = 128; + + INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); + u64_stats_init(&mhi_netdev->stats.rx_syncp); + u64_stats_init(&mhi_netdev->stats.tx_syncp); + + /* Start MHI channels */ + err = mhi_prepare_for_transfer(mhi_dev); + if (err) + goto out_err; + + err = register_netdev(ndev); + if (err) + goto out_err; + + return 0; + +out_err: + free_netdev(ndev); + return err; +} + +static void mhi_net_remove(struct mhi_device *mhi_dev) +{ + struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); + + unregister_netdev(mhi_netdev->ndev); + + mhi_unprepare_from_transfer(mhi_netdev->mdev); + + free_netdev(mhi_netdev->ndev); +} + +static const struct mhi_device_id mhi_net_id_table[] = { + { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" }, + { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" }, + {} +}; +MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); + +static struct mhi_driver mhi_net_driver = { + .probe = mhi_net_probe, + .remove = mhi_net_remove, + .dl_xfer_cb = mhi_net_dl_callback, + .ul_xfer_cb = mhi_net_ul_callback, + .id_table = mhi_net_id_table, + .driver = { + .name = "mhi_net", + .owner = THIS_MODULE, + }, +}; + +module_mhi_driver(mhi_net_driver); + +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); +MODULE_DESCRIPTION("Network over MHI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/mii.c b/drivers/net/mii.c index f6a97c859f3a..e71ebb933266 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -84,15 +84,16 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } + + ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); + if (mii->supports_gmii) + ecmd->advertising |= + mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; - ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); - if (mii->supports_gmii) - ecmd->advertising |= - mii_ctrl1000_to_ethtool_adv_t(ctrl1000); - if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= @@ -171,14 +172,15 @@ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii, ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } + + advertising |= mii_get_an(mii, MII_ADVERTISE); + if (mii->supports_gmii) + advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + if (bmcr & BMCR_ANENABLE) { advertising |= ADVERTISED_Autoneg; cmd->base.autoneg = AUTONEG_ENABLE; - advertising |= mii_get_an(mii, MII_ADVERTISE); - if (mii->supports_gmii) - advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); - if (bmsr & BMSR_ANEGCOMPLETE) { lp_advertising = mii_get_an(mii, MII_LPA); lp_advertising |= diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index fb182bec8f06..2a4892402ed8 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -697,7 +697,7 @@ static struct failover_ops net_failover_ops = { /** * net_failover_create - Create and register a failover instance * - * @dev: standby netdev + * @standby_dev: standby netdev * * Creates a failover netdev and registers a failover instance for a standby * netdev. Used by paravirtual drivers that use 3-netdev model. diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 92001f7af380..ccecba908ded 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -83,6 +83,7 @@ static struct console netconsole_ext; * whether the corresponding netpoll is active or inactive. * Also, other parameters of a target may be modified at * runtime only when it is disabled (enabled == 0). + * @extended: Denotes whether console is extended or not. * @np: The netpoll structure for this target. * Contains the other userspace visible parameters: * dev_name (read-write) diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index e7972e88ffe0..816af1f55e2c 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -326,6 +326,12 @@ static int nsim_dev_resources_register(struct devlink *devlink) return err; } + /* Resources for nexthops */ + err = devlink_resource_register(devlink, "nexthops", (u64)-1, + NSIM_RESOURCE_NEXTHOPS, + DEVLINK_RESOURCE_ID_PARENT_TOP, + ¶ms); + out: return err; } @@ -760,7 +766,6 @@ static int nsim_dev_flash_update(struct devlink *devlink, return -EOPNOTSUPP; if (nsim_dev->fw_update_status) { - devlink_flash_update_begin_notify(devlink); devlink_flash_update_status_notify(devlink, "Preparing to flash", params->component, 0, 0); @@ -784,7 +789,6 @@ static int nsim_dev_flash_update(struct devlink *devlink, params->component, 81); devlink_flash_update_status_notify(devlink, "Flashing done", params->component, 0, 0); - devlink_flash_update_end_notify(devlink); } return 0; diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index f1884d90a876..166f0d6cbcf7 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -13,9 +13,9 @@ nsim_get_pause_stats(struct net_device *dev, { struct netdevsim *ns = netdev_priv(dev); - if (ns->ethtool.report_stats_rx) + if (ns->ethtool.pauseparam.report_stats_rx) pause_stats->rx_pause_frames = 1; - if (ns->ethtool.report_stats_tx) + if (ns->ethtool.pauseparam.report_stats_tx) pause_stats->tx_pause_frames = 2; } @@ -25,8 +25,8 @@ nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) struct netdevsim *ns = netdev_priv(dev); pause->autoneg = 0; /* We don't support ksettings, so can't pretend */ - pause->rx_pause = ns->ethtool.rx; - pause->tx_pause = ns->ethtool.tx; + pause->rx_pause = ns->ethtool.pauseparam.rx; + pause->tx_pause = ns->ethtool.pauseparam.tx; } static int @@ -37,28 +37,88 @@ nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) if (pause->autoneg) return -EINVAL; - ns->ethtool.rx = pause->rx_pause; - ns->ethtool.tx = pause->tx_pause; + ns->ethtool.pauseparam.rx = pause->rx_pause; + ns->ethtool.pauseparam.tx = pause->tx_pause; + return 0; +} + +static int nsim_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct netdevsim *ns = netdev_priv(dev); + + memcpy(coal, &ns->ethtool.coalesce, sizeof(ns->ethtool.coalesce)); + return 0; +} + +static int nsim_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct netdevsim *ns = netdev_priv(dev); + + memcpy(&ns->ethtool.coalesce, coal, sizeof(ns->ethtool.coalesce)); + return 0; +} + +static void nsim_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netdevsim *ns = netdev_priv(dev); + + memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring)); +} + +static int nsim_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct netdevsim *ns = netdev_priv(dev); + + memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring)); return 0; } static const struct ethtool_ops nsim_ethtool_ops = { - .get_pause_stats = nsim_get_pause_stats, - .get_pauseparam = nsim_get_pauseparam, - .set_pauseparam = nsim_set_pauseparam, + .supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS, + .get_pause_stats = nsim_get_pause_stats, + .get_pauseparam = nsim_get_pauseparam, + .set_pauseparam = nsim_set_pauseparam, + .set_coalesce = nsim_set_coalesce, + .get_coalesce = nsim_get_coalesce, + .get_ringparam = nsim_get_ringparam, + .set_ringparam = nsim_set_ringparam, }; +static void nsim_ethtool_ring_init(struct netdevsim *ns) +{ + ns->ethtool.ring.rx_max_pending = 4096; + ns->ethtool.ring.rx_jumbo_max_pending = 4096; + ns->ethtool.ring.rx_mini_max_pending = 4096; + ns->ethtool.ring.tx_max_pending = 4096; +} + void nsim_ethtool_init(struct netdevsim *ns) { struct dentry *ethtool, *dir; ns->netdev->ethtool_ops = &nsim_ethtool_ops; + nsim_ethtool_ring_init(ns); + ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir); dir = debugfs_create_dir("pause", ethtool); debugfs_create_bool("report_stats_rx", 0600, dir, - &ns->ethtool.report_stats_rx); + &ns->ethtool.pauseparam.report_stats_rx); debugfs_create_bool("report_stats_tx", 0600, dir, - &ns->ethtool.report_stats_tx); + &ns->ethtool.pauseparam.report_stats_tx); + + dir = debugfs_create_dir("ring", ethtool); + debugfs_create_u32("rx_max_pending", 0600, dir, + &ns->ethtool.ring.rx_max_pending); + debugfs_create_u32("rx_jumbo_max_pending", 0600, dir, + &ns->ethtool.ring.rx_jumbo_max_pending); + debugfs_create_u32("rx_mini_max_pending", 0600, dir, + &ns->ethtool.ring.rx_mini_max_pending); + debugfs_create_u32("tx_max_pending", 0600, dir, + &ns->ethtool.ring.tx_max_pending); } diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index deea17a0e79c..45d8a7790bd5 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -25,6 +25,7 @@ #include <net/ip6_fib.h> #include <net/fib_rules.h> #include <net/net_namespace.h> +#include <net/nexthop.h> #include "netdevsim.h" @@ -42,9 +43,12 @@ struct nsim_fib_data { struct notifier_block fib_nb; struct nsim_per_fib_data ipv4; struct nsim_per_fib_data ipv6; + struct nsim_fib_entry nexthops; struct rhashtable fib_rt_ht; struct list_head fib_rt_list; spinlock_t fib_lock; /* Protects hashtable, list and accounting */ + struct notifier_block nexthop_nb; + struct rhashtable nexthop_ht; struct devlink *devlink; }; @@ -86,6 +90,19 @@ static const struct rhashtable_params nsim_fib_rt_ht_params = { .automatic_shrinking = true, }; +struct nsim_nexthop { + struct rhash_head ht_node; + u64 occ; + u32 id; +}; + +static const struct rhashtable_params nsim_nexthop_ht_params = { + .key_offset = offsetof(struct nsim_nexthop, id), + .head_offset = offsetof(struct nsim_nexthop, ht_node), + .key_len = sizeof(u32), + .automatic_shrinking = true, +}; + u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, enum nsim_resource_id res_id, bool max) { @@ -104,6 +121,9 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, case NSIM_RESOURCE_IPV6_FIB_RULES: entry = &fib_data->ipv6.rules; break; + case NSIM_RESOURCE_NEXTHOPS: + entry = &fib_data->nexthops; + break; default: return 0; } @@ -129,6 +149,9 @@ static void nsim_fib_set_max(struct nsim_fib_data *fib_data, case NSIM_RESOURCE_IPV6_FIB_RULES: entry = &fib_data->ipv6.rules; break; + case NSIM_RESOURCE_NEXTHOPS: + entry = &fib_data->nexthops; + break; default: WARN_ON(1); return; @@ -389,11 +412,6 @@ static int nsim_fib4_event(struct nsim_fib_data *data, fen_info = container_of(info, struct fib_entry_notifier_info, info); - if (fen_info->fi->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported"); - return 0; - } - switch (event) { case FIB_EVENT_ENTRY_REPLACE: err = nsim_fib4_rt_insert(data, fen_info); @@ -704,11 +722,6 @@ static int nsim_fib6_event(struct nsim_fib_data *data, fen6_info = container_of(info, struct fib6_entry_notifier_info, info); - if (fen6_info->rt->nh) { - NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported"); - return 0; - } - if (fen6_info->rt->fib6_src.plen) { NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported"); return 0; @@ -838,6 +851,196 @@ static void nsim_fib_dump_inconsistent(struct notifier_block *nb) data->ipv6.rules.num = 0ULL; } +static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + struct nsim_nexthop *nexthop; + u64 occ = 0; + int i; + + nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL); + if (!nexthop) + return NULL; + + nexthop->id = info->id; + + /* Determine the number of nexthop entries the new nexthop will + * occupy. + */ + + if (!info->is_grp) { + occ = 1; + goto out; + } + + for (i = 0; i < info->nh_grp->num_nh; i++) + occ += info->nh_grp->nh_entries[i].weight; + +out: + nexthop->occ = occ; + return nexthop; +} + +static void nsim_nexthop_destroy(struct nsim_nexthop *nexthop) +{ + kfree(nexthop); +} + +static int nsim_nexthop_account(struct nsim_fib_data *data, u64 occ, + bool add, struct netlink_ext_ack *extack) +{ + int err = 0; + + if (add) { + if (data->nexthops.num + occ <= data->nexthops.max) { + data->nexthops.num += occ; + } else { + err = -ENOSPC; + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops"); + } + } else { + if (WARN_ON(occ > data->nexthops.num)) + return -EINVAL; + data->nexthops.num -= occ; + } + + return err; +} + +static int nsim_nexthop_add(struct nsim_fib_data *data, + struct nsim_nexthop *nexthop, + struct netlink_ext_ack *extack) +{ + struct net *net = devlink_net(data->devlink); + int err; + + err = nsim_nexthop_account(data, nexthop->occ, true, extack); + if (err) + return err; + + err = rhashtable_insert_fast(&data->nexthop_ht, &nexthop->ht_node, + nsim_nexthop_ht_params); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to insert nexthop"); + goto err_nexthop_dismiss; + } + + nexthop_set_hw_flags(net, nexthop->id, false, true); + + return 0; + +err_nexthop_dismiss: + nsim_nexthop_account(data, nexthop->occ, false, extack); + return err; +} + +static int nsim_nexthop_replace(struct nsim_fib_data *data, + struct nsim_nexthop *nexthop, + struct nsim_nexthop *nexthop_old, + struct netlink_ext_ack *extack) +{ + struct net *net = devlink_net(data->devlink); + int err; + + err = nsim_nexthop_account(data, nexthop->occ, true, extack); + if (err) + return err; + + err = rhashtable_replace_fast(&data->nexthop_ht, + &nexthop_old->ht_node, &nexthop->ht_node, + nsim_nexthop_ht_params); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to replace nexthop"); + goto err_nexthop_dismiss; + } + + nexthop_set_hw_flags(net, nexthop->id, false, true); + nsim_nexthop_account(data, nexthop_old->occ, false, extack); + nsim_nexthop_destroy(nexthop_old); + + return 0; + +err_nexthop_dismiss: + nsim_nexthop_account(data, nexthop->occ, false, extack); + return err; +} + +static int nsim_nexthop_insert(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + struct nsim_nexthop *nexthop, *nexthop_old; + int err; + + nexthop = nsim_nexthop_create(data, info); + if (!nexthop) + return -ENOMEM; + + nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id, + nsim_nexthop_ht_params); + if (!nexthop_old) + err = nsim_nexthop_add(data, nexthop, info->extack); + else + err = nsim_nexthop_replace(data, nexthop, nexthop_old, + info->extack); + + if (err) + nsim_nexthop_destroy(nexthop); + + return err; +} + +static void nsim_nexthop_remove(struct nsim_fib_data *data, + struct nh_notifier_info *info) +{ + struct nsim_nexthop *nexthop; + + nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &info->id, + nsim_nexthop_ht_params); + if (!nexthop) + return; + + rhashtable_remove_fast(&data->nexthop_ht, &nexthop->ht_node, + nsim_nexthop_ht_params); + nsim_nexthop_account(data, nexthop->occ, false, info->extack); + nsim_nexthop_destroy(nexthop); +} + +static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, + nexthop_nb); + struct nh_notifier_info *info = ptr; + int err = 0; + + ASSERT_RTNL(); + + switch (event) { + case NEXTHOP_EVENT_REPLACE: + err = nsim_nexthop_insert(data, info); + break; + case NEXTHOP_EVENT_DEL: + nsim_nexthop_remove(data, info); + break; + default: + break; + } + + return notifier_from_errno(err); +} + +static void nsim_nexthop_free(void *ptr, void *arg) +{ + struct nsim_nexthop *nexthop = ptr; + struct nsim_fib_data *data = arg; + struct net *net; + + net = devlink_net(data->devlink); + nexthop_set_hw_flags(net, nexthop->id, false, false); + nsim_nexthop_account(data, nexthop->occ, false, NULL); + nsim_nexthop_destroy(nexthop); +} + static u64 nsim_fib_ipv4_resource_occ_get(void *priv) { struct nsim_fib_data *data = priv; @@ -866,12 +1069,20 @@ static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv) return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false); } +static u64 nsim_fib_nexthops_res_occ_get(void *priv) +{ + struct nsim_fib_data *data = priv; + + return nsim_fib_get_val(data, NSIM_RESOURCE_NEXTHOPS, false); +} + static void nsim_fib_set_max_all(struct nsim_fib_data *data, struct devlink *devlink) { enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, - NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES + NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES, + NSIM_RESOURCE_NEXTHOPS, }; int i; @@ -897,20 +1108,32 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, return ERR_PTR(-ENOMEM); data->devlink = devlink; + err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params); + if (err) + goto err_data_free; + spin_lock_init(&data->fib_lock); INIT_LIST_HEAD(&data->fib_rt_list); err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params); if (err) - goto err_data_free; + goto err_rhashtable_nexthop_destroy; nsim_fib_set_max_all(data, devlink); + data->nexthop_nb.notifier_call = nsim_nexthop_event_nb; + err = register_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb, + extack); + if (err) { + pr_err("Failed to register nexthop notifier\n"); + goto err_rhashtable_fib_destroy; + } + data->fib_nb.notifier_call = nsim_fib_event_nb; err = register_fib_notifier(devlink_net(devlink), &data->fib_nb, nsim_fib_dump_inconsistent, extack); if (err) { pr_err("Failed to register fib notifier\n"); - goto err_rhashtable_destroy; + goto err_nexthop_nb_unregister; } devlink_resource_occ_get_register(devlink, @@ -929,11 +1152,20 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink, NSIM_RESOURCE_IPV6_FIB_RULES, nsim_fib_ipv6_rules_res_occ_get, data); + devlink_resource_occ_get_register(devlink, + NSIM_RESOURCE_NEXTHOPS, + nsim_fib_nexthops_res_occ_get, + data); return data; -err_rhashtable_destroy: +err_nexthop_nb_unregister: + unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); +err_rhashtable_fib_destroy: rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, data); +err_rhashtable_nexthop_destroy: + rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free, + data); err_data_free: kfree(data); return ERR_PTR(err); @@ -942,6 +1174,8 @@ err_data_free: void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) { devlink_resource_occ_get_unregister(devlink, + NSIM_RESOURCE_NEXTHOPS); + devlink_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV6_FIB_RULES); devlink_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV6_FIB); @@ -950,8 +1184,11 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data) devlink_resource_occ_get_unregister(devlink, NSIM_RESOURCE_IPV4_FIB); unregister_fib_notifier(devlink_net(devlink), &data->fib_nb); + unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb); rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free, data); + rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free, + data); WARN_ON_ONCE(!list_empty(&data->fib_rt_list)); kfree(data); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index c4e7ad2a1964..48163c5f2ec9 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -15,6 +15,7 @@ #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/netdevice.h> @@ -51,13 +52,19 @@ struct nsim_ipsec { u32 ok; }; -struct nsim_ethtool { +struct nsim_ethtool_pauseparam { bool rx; bool tx; bool report_stats_rx; bool report_stats_tx; }; +struct nsim_ethtool { + struct nsim_ethtool_pauseparam pauseparam; + struct ethtool_coalesce coalesce; + struct ethtool_ringparam ring; +}; + struct netdevsim { struct net_device *netdev; struct nsim_dev *nsim_dev; @@ -158,6 +165,7 @@ enum nsim_resource_id { NSIM_RESOURCE_IPV6, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES, + NSIM_RESOURCE_NEXTHOPS, }; struct nsim_dev_health { diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index afb119f38325..5e19a6839dea 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index 307f0ac1287b..55a0b91816e2 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -8,6 +8,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/errno.h> +#include <linux/ethtool_netlink.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mii.h> @@ -23,6 +24,7 @@ #define ADIN1300_PHY_CTRL1 0x0012 #define ADIN1300_AUTO_MDI_EN BIT(10) #define ADIN1300_MAN_MDIX_EN BIT(9) +#define ADIN1300_DIAG_CLK_EN BIT(2) #define ADIN1300_RX_ERR_CNT 0x0014 @@ -69,6 +71,31 @@ #define ADIN1300_CLOCK_STOP_REG 0x9400 #define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000 +#define ADIN1300_CDIAG_RUN 0xba1b +#define ADIN1300_CDIAG_RUN_EN BIT(0) + +/* + * The XSIM3/2/1 and XSHRT3/2/1 are actually relative. + * For CDIAG_DTLD_RSLTS(0) it's ADIN1300_CDIAG_RSLT_XSIM3/2/1 + * For CDIAG_DTLD_RSLTS(1) it's ADIN1300_CDIAG_RSLT_XSIM3/2/0 + * For CDIAG_DTLD_RSLTS(2) it's ADIN1300_CDIAG_RSLT_XSIM3/1/0 + * For CDIAG_DTLD_RSLTS(3) it's ADIN1300_CDIAG_RSLT_XSIM2/1/0 + */ +#define ADIN1300_CDIAG_DTLD_RSLTS(x) (0xba1d + (x)) +#define ADIN1300_CDIAG_RSLT_BUSY BIT(10) +#define ADIN1300_CDIAG_RSLT_XSIM3 BIT(9) +#define ADIN1300_CDIAG_RSLT_XSIM2 BIT(8) +#define ADIN1300_CDIAG_RSLT_XSIM1 BIT(7) +#define ADIN1300_CDIAG_RSLT_SIM BIT(6) +#define ADIN1300_CDIAG_RSLT_XSHRT3 BIT(5) +#define ADIN1300_CDIAG_RSLT_XSHRT2 BIT(4) +#define ADIN1300_CDIAG_RSLT_XSHRT1 BIT(3) +#define ADIN1300_CDIAG_RSLT_SHRT BIT(2) +#define ADIN1300_CDIAG_RSLT_OPEN BIT(1) +#define ADIN1300_CDIAG_RSLT_GOOD BIT(0) + +#define ADIN1300_CDIAG_FLT_DIST(x) (0xba21 + (x)) + #define ADIN1300_GE_SOFT_RESET_REG 0xff0c #define ADIN1300_GE_SOFT_RESET BIT(0) @@ -321,10 +348,9 @@ static int adin_set_downshift(struct phy_device *phydev, u8 cnt) return -E2BIG; val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt); - val |= ADIN1300_LINKING_EN; rc = phy_modify(phydev, ADIN1300_PHY_CTRL3, - ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK, + ADIN1300_DOWNSPEED_RETRIES_MSK, val); if (rc < 0) return rc; @@ -445,12 +471,43 @@ static int adin_phy_ack_intr(struct phy_device *phydev) static int adin_phy_config_intr(struct phy_device *phydev) { - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - return phy_set_bits(phydev, ADIN1300_INT_MASK_REG, - ADIN1300_INT_MASK_EN); + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = adin_phy_ack_intr(phydev); + if (err) + return err; + + err = phy_set_bits(phydev, ADIN1300_INT_MASK_REG, + ADIN1300_INT_MASK_EN); + } else { + err = phy_clear_bits(phydev, ADIN1300_INT_MASK_REG, + ADIN1300_INT_MASK_EN); + if (err) + return err; + + err = adin_phy_ack_intr(phydev); + } + + return err; +} + +static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, ADIN1300_INT_STATUS_REG); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } - return phy_clear_bits(phydev, ADIN1300_INT_MASK_REG, - ADIN1300_INT_MASK_EN); + if (!(irq_status & ADIN1300_INT_LINK_STAT_CHNG_EN)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad, @@ -555,6 +612,14 @@ static int adin_config_aneg(struct phy_device *phydev) { int ret; + ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN); + if (ret < 0) + return ret; + + ret = phy_set_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN); + if (ret < 0) + return ret; + ret = adin_config_mdix(phydev); if (ret) return ret; @@ -725,10 +790,117 @@ static int adin_probe(struct phy_device *phydev) return 0; } +static int adin_cable_test_start(struct phy_device *phydev) +{ + int ret; + + ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN); + if (ret < 0) + return ret; + + ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN); + if (ret < 0) + return ret; + + /* wait a bit for the clock to stabilize */ + msleep(50); + + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN, + ADIN1300_CDIAG_RUN_EN); +} + +static int adin_cable_test_report_trans(int result) +{ + int mask; + + if (result & ADIN1300_CDIAG_RSLT_GOOD) + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + if (result & ADIN1300_CDIAG_RSLT_OPEN) + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + + /* short with other pairs */ + mask = ADIN1300_CDIAG_RSLT_XSHRT3 | + ADIN1300_CDIAG_RSLT_XSHRT2 | + ADIN1300_CDIAG_RSLT_XSHRT1; + if (result & mask) + return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT; + + if (result & ADIN1300_CDIAG_RSLT_SHRT) + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; +} + +static int adin_cable_test_report_pair(struct phy_device *phydev, + unsigned int pair) +{ + int fault_rslt; + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_CDIAG_DTLD_RSLTS(pair)); + if (ret < 0) + return ret; + + fault_rslt = adin_cable_test_report_trans(ret); + + ret = ethnl_cable_test_result(phydev, pair, fault_rslt); + if (ret < 0) + return ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, + ADIN1300_CDIAG_FLT_DIST(pair)); + if (ret < 0) + return ret; + + switch (fault_rslt) { + case ETHTOOL_A_CABLE_RESULT_CODE_OPEN: + case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT: + case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT: + return ethnl_cable_test_fault_length(phydev, pair, ret * 100); + default: + return 0; + } +} + +static int adin_cable_test_report(struct phy_device *phydev) +{ + unsigned int pair; + int ret; + + for (pair = ETHTOOL_A_CABLE_PAIR_A; pair <= ETHTOOL_A_CABLE_PAIR_D; pair++) { + ret = adin_cable_test_report_pair(phydev, pair); + if (ret < 0) + return ret; + } + + return 0; +} + +static int adin_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret; + + *finished = false; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN); + if (ret < 0) + return ret; + + if (ret & ADIN1300_CDIAG_RUN_EN) + return 0; + + *finished = true; + + return adin_cable_test_report(phydev); +} + static struct phy_driver adin_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200), .name = "ADIN1200", + .flags = PHY_POLL_CABLE_TEST, .probe = adin_probe, .config_init = adin_config_init, .soft_reset = adin_soft_reset, @@ -736,8 +908,8 @@ static struct phy_driver adin_driver[] = { .read_status = adin_read_status, .get_tunable = adin_get_tunable, .set_tunable = adin_set_tunable, - .ack_interrupt = adin_phy_ack_intr, .config_intr = adin_phy_config_intr, + .handle_interrupt = adin_phy_handle_interrupt, .get_sset_count = adin_get_sset_count, .get_strings = adin_get_strings, .get_stats = adin_get_stats, @@ -745,10 +917,13 @@ static struct phy_driver adin_driver[] = { .suspend = genphy_suspend, .read_mmd = adin_read_mmd, .write_mmd = adin_write_mmd, + .cable_test_start = adin_cable_test_start, + .cable_test_get_status = adin_cable_test_get_status, }, { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300), .name = "ADIN1300", + .flags = PHY_POLL_CABLE_TEST, .probe = adin_probe, .config_init = adin_config_init, .soft_reset = adin_soft_reset, @@ -756,8 +931,8 @@ static struct phy_driver adin_driver[] = { .read_status = adin_read_status, .get_tunable = adin_get_tunable, .set_tunable = adin_set_tunable, - .ack_interrupt = adin_phy_ack_intr, .config_intr = adin_phy_config_intr, + .handle_interrupt = adin_phy_handle_interrupt, .get_sset_count = adin_get_sset_count, .get_strings = adin_get_strings, .get_stats = adin_get_stats, @@ -765,6 +940,8 @@ static struct phy_driver adin_driver[] = { .suspend = genphy_suspend, .read_mmd = adin_read_mmd, .write_mmd = adin_write_mmd, + .cable_test_start = adin_cable_test_start, + .cable_test_get_status = adin_cable_test_get_status, }, }; diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index eef35f8c8d45..001bb6d8bfce 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -20,6 +20,10 @@ #define MII_AM79C_IR_EN_ANEG 0x0100 /* IR enable Aneg Complete */ #define MII_AM79C_IR_IMASK_INIT (MII_AM79C_IR_EN_LINK | MII_AM79C_IR_EN_ANEG) +#define MII_AM79C_IR_LINK_DOWN BIT(2) +#define MII_AM79C_IR_ANEG_DONE BIT(0) +#define MII_AM79C_IR_IMASK_STAT (MII_AM79C_IR_LINK_DOWN | MII_AM79C_IR_ANEG_DONE) + MODULE_DESCRIPTION("AMD PHY driver"); MODULE_AUTHOR("Heiko Schocher <hs@denx.de>"); MODULE_LICENSE("GPL"); @@ -48,22 +52,49 @@ static int am79c_config_intr(struct phy_device *phydev) { int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = am79c_ack_interrupt(phydev); + if (err) + return err; + err = phy_write(phydev, MII_AM79C_IR, MII_AM79C_IR_IMASK_INIT); - else + } else { err = phy_write(phydev, MII_AM79C_IR, 0); + if (err) + return err; + + err = am79c_ack_interrupt(phydev); + } return err; } +static irqreturn_t am79c_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_AM79C_IR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_AM79C_IR_IMASK_STAT)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static struct phy_driver am79c_driver[] = { { .phy_id = PHY_ID_AM79C874, .name = "AM79C874", .phy_id_mask = 0xfffffff0, /* PHY_BASIC_FEATURES */ .config_init = am79c_config_init, - .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, + .handle_interrupt = am79c_handle_interrupt, } }; module_phy_driver(am79c_driver); diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index 41e7c1432497..968dd43a2b1e 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -52,6 +52,7 @@ #define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1) #define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01 +#define MDIO_AN_TX_VEND_INT_STATUS2_MASK BIT(0) #define MDIO_AN_TX_VEND_INT_MASK2 0xd401 #define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0) @@ -246,6 +247,13 @@ static int aqr_config_intr(struct phy_device *phydev) bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED; int err; + if (en) { + /* Clear any pending interrupts before enabling them */ + err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2); + if (err < 0) + return err; + } + err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2, en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0); if (err < 0) @@ -256,18 +264,39 @@ static int aqr_config_intr(struct phy_device *phydev) if (err < 0) return err; - return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK, - en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 | - VEND1_GLOBAL_INT_VEND_MASK_AN : 0); + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK, + en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 | + VEND1_GLOBAL_INT_VEND_MASK_AN : 0); + if (err < 0) + return err; + + if (!en) { + /* Clear any pending interrupts after we have disabled them */ + err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2); + if (err < 0) + return err; + } + + return 0; } -static int aqr_ack_interrupt(struct phy_device *phydev) +static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev) { - int reg; + int irq_status; + + irq_status = phy_read_mmd(phydev, MDIO_MMD_AN, + MDIO_AN_TX_VEND_INT_STATUS2); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); - reg = phy_read_mmd(phydev, MDIO_MMD_AN, - MDIO_AN_TX_VEND_INT_STATUS2); - return (reg < 0) ? reg : 0; + return IRQ_HANDLED; } static int aqr_read_status(struct phy_device *phydev) @@ -584,7 +613,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQ1202", .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr_read_status, }, { @@ -592,7 +621,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQ2104", .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr_read_status, }, { @@ -600,7 +629,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQR105", .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr_read_status, .suspend = aqr107_suspend, .resume = aqr107_resume, @@ -610,7 +639,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQR106", .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr_read_status, }, { @@ -620,7 +649,7 @@ static struct phy_driver aqr_driver[] = { .config_init = aqr107_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr107_read_status, .get_tunable = aqr107_get_tunable, .set_tunable = aqr107_set_tunable, @@ -638,7 +667,7 @@ static struct phy_driver aqr_driver[] = { .config_init = aqcs109_config_init, .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr107_read_status, .get_tunable = aqr107_get_tunable, .set_tunable = aqr107_set_tunable, @@ -654,7 +683,7 @@ static struct phy_driver aqr_driver[] = { .name = "Aquantia AQR405", .config_aneg = aqr_config_aneg, .config_intr = aqr_config_intr, - .ack_interrupt = aqr_ack_interrupt, + .handle_interrupt = aqr_handle_interrupt, .read_status = aqr_read_status, }, }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index ed601a7e46a0..d0b36fd6c265 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -614,6 +614,11 @@ static int at803x_config_intr(struct phy_device *phydev) value = phy_read(phydev, AT803X_INTR_ENABLE); if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* Clear any pending interrupts */ + err = at803x_ack_interrupt(phydev); + if (err) + return err; + value |= AT803X_INTR_ENABLE_AUTONEG_ERR; value |= AT803X_INTR_ENABLE_SPEED_CHANGED; value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED; @@ -621,13 +626,44 @@ static int at803x_config_intr(struct phy_device *phydev) value |= AT803X_INTR_ENABLE_LINK_SUCCESS; err = phy_write(phydev, AT803X_INTR_ENABLE, value); - } - else + } else { err = phy_write(phydev, AT803X_INTR_ENABLE, 0); + if (err) + return err; + + /* Clear any pending interrupts */ + err = at803x_ack_interrupt(phydev); + } return err; } +static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev) +{ + int irq_status, int_enabled; + + irq_status = phy_read(phydev, AT803X_INTR_STATUS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + /* Read the current enabled interrupts */ + int_enabled = phy_read(phydev, AT803X_INTR_ENABLE); + if (int_enabled < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + /* See if this was one of our enabled interrupts */ + if (!(irq_status & int_enabled)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static void at803x_link_change_notify(struct phy_device *phydev) { /* @@ -1062,8 +1098,8 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, /* PHY_GBIT_FEATURES */ .read_status = at803x_read_status, - .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, .set_tunable = at803x_set_tunable, .cable_test_start = at803x_cable_test_start, @@ -1082,8 +1118,8 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, /* PHY_BASIC_FEATURES */ - .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, }, { /* Qualcomm Atheros AR8031/AR8033 */ PHY_ID_MATCH_EXACT(ATH8031_PHY_ID), @@ -1100,8 +1136,8 @@ static struct phy_driver at803x_driver[] = { /* PHY_GBIT_FEATURES */ .read_status = at803x_read_status, .aneg_done = at803x_aneg_done, - .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, .set_tunable = at803x_set_tunable, .cable_test_start = at803x_cable_test_start, @@ -1120,8 +1156,8 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, /* PHY_BASIC_FEATURES */ - .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, .cable_test_start = at803x_cable_test_start, .cable_test_get_status = at803x_cable_test_get_status, }, { @@ -1132,8 +1168,8 @@ static struct phy_driver at803x_driver[] = { .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, /* PHY_BASIC_FEATURES */ - .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, + .handle_interrupt = at803x_handle_interrupt, .cable_test_start = at803x_cable_test_start, .cable_test_get_status = at803x_cable_test_get_status, .read_status = at803x_read_status, diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index 9ccf28b0a04d..da8f7cb41b44 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -256,8 +256,8 @@ static struct phy_driver bcm_cygnus_phy_driver[] = { .name = "Broadcom Cygnus PHY", /* PHY_GBIT_FEATURES */ .config_init = bcm_cygnus_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, .suspend = genphy_suspend, .resume = bcm_cygnus_resume, }, { diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index ef6825b30323..53282a6d5928 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -181,21 +181,62 @@ EXPORT_SYMBOL_GPL(bcm_phy_ack_intr); int bcm_phy_config_intr(struct phy_device *phydev) { - int reg; + int reg, err; reg = phy_read(phydev, MII_BCM54XX_ECR); if (reg < 0) return reg; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = bcm_phy_ack_intr(phydev); + if (err) + return err; + reg &= ~MII_BCM54XX_ECR_IM; - else + err = phy_write(phydev, MII_BCM54XX_ECR, reg); + } else { reg |= MII_BCM54XX_ECR_IM; + err = phy_write(phydev, MII_BCM54XX_ECR, reg); + if (err) + return err; - return phy_write(phydev, MII_BCM54XX_ECR, reg); + err = bcm_phy_ack_intr(phydev); + } + return err; } EXPORT_SYMBOL_GPL(bcm_phy_config_intr); +irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev) +{ + int irq_status, irq_mask; + + irq_status = phy_read(phydev, MII_BCM54XX_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + /* If a bit from the Interrupt Mask register is set, the corresponding + * bit from the Interrupt Status register is masked. So read the IMR + * and then flip the bits to get the list of possible interrupt + * sources. + */ + irq_mask = phy_read(phydev, MII_BCM54XX_IMR); + if (irq_mask < 0) { + phy_error(phydev); + return IRQ_NONE; + } + irq_mask = ~irq_mask; + + if (!(irq_status & irq_mask)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(bcm_phy_handle_interrupt); + int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow) { phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 237a8503c9b4..c3842f87c33b 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -63,6 +63,7 @@ int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, int bcm_phy_ack_intr(struct phy_device *phydev); int bcm_phy_config_intr(struct phy_device *phydev); +irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev); int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down); diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c index 8998e68bb26b..d8f3024860dc 100644 --- a/drivers/net/phy/bcm54140.c +++ b/drivers/net/phy/bcm54140.c @@ -637,13 +637,29 @@ static int bcm54140_config_init(struct phy_device *phydev) BCM54140_RDB_C_PWR_ISOLATE, 0); } -static int bcm54140_did_interrupt(struct phy_device *phydev) +static irqreturn_t bcm54140_handle_interrupt(struct phy_device *phydev) { - int ret; + int irq_status, irq_mask; + + irq_status = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + irq_mask = bcm_phy_read_rdb(phydev, BCM54140_RDB_IMR); + if (irq_mask < 0) { + phy_error(phydev); + return IRQ_NONE; + } + irq_mask = ~irq_mask; + + if (!(irq_status & irq_mask)) + return IRQ_NONE; - ret = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR); + phy_trigger_machine(phydev); - return (ret < 0) ? 0 : ret; + return IRQ_HANDLED; } static int bcm54140_ack_intr(struct phy_device *phydev) @@ -665,7 +681,7 @@ static int bcm54140_config_intr(struct phy_device *phydev) BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1, BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3, }; - int reg; + int reg, err; if (priv->port >= ARRAY_SIZE(port_to_imr_bit)) return -EINVAL; @@ -674,12 +690,23 @@ static int bcm54140_config_intr(struct phy_device *phydev) if (reg < 0) return reg; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = bcm54140_ack_intr(phydev); + if (err) + return err; + reg &= ~port_to_imr_bit[priv->port]; - else + err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg); + } else { reg |= port_to_imr_bit[priv->port]; + err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg); + if (err) + return err; + + err = bcm54140_ack_intr(phydev); + } - return bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg); + return err; } static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data) @@ -834,8 +861,7 @@ static struct phy_driver bcm54140_drivers[] = { .flags = PHY_POLL_CABLE_TEST, .features = PHY_GBIT_FEATURES, .config_init = bcm54140_config_init, - .did_interrupt = bcm54140_did_interrupt, - .ack_interrupt = bcm54140_ack_intr, + .handle_interrupt = bcm54140_handle_interrupt, .config_intr = bcm54140_config_intr, .probe = bcm54140_probe, .suspend = genphy_suspend, diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index 459fb2069c7e..0eb33be824f1 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -25,12 +25,22 @@ static int bcm63xx_config_intr(struct phy_device *phydev) if (reg < 0) return reg; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = bcm_phy_ack_intr(phydev); + if (err) + return err; + reg &= ~MII_BCM63XX_IR_GMASK; - else + err = phy_write(phydev, MII_BCM63XX_IR, reg); + } else { reg |= MII_BCM63XX_IR_GMASK; + err = phy_write(phydev, MII_BCM63XX_IR, reg); + if (err) + return err; + + err = bcm_phy_ack_intr(phydev); + } - err = phy_write(phydev, MII_BCM63XX_IR, reg); return err; } @@ -67,8 +77,8 @@ static struct phy_driver bcm63xx_driver[] = { /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { /* same phy as above, with just a different OUI */ .phy_id = 0x002bdc00, @@ -77,8 +87,8 @@ static struct phy_driver bcm63xx_driver[] = { /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, } }; module_phy_driver(bcm63xx_driver); diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index df360e1c5069..4ac8fd190e9d 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -144,35 +144,41 @@ static int bcm87xx_config_intr(struct phy_device *phydev) if (reg < 0) return reg; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = phy_read(phydev, BCM87XX_LASI_STATUS); + if (err) + return err; + reg |= 1; - else + err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); + } else { reg &= ~1; + err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); + if (err) + return err; + + err = phy_read(phydev, BCM87XX_LASI_STATUS); + } - err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg); return err; } -static int bcm87xx_did_interrupt(struct phy_device *phydev) +static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev) { - int reg; + int irq_status; - reg = phy_read(phydev, BCM87XX_LASI_STATUS); - - if (reg < 0) { - phydev_err(phydev, - "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", - reg); - return 0; + irq_status = phy_read(phydev, BCM87XX_LASI_STATUS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; } - return (reg & 1) != 0; -} -static int bcm87xx_ack_interrupt(struct phy_device *phydev) -{ - /* Reading the LASI status clears it. */ - bcm87xx_did_interrupt(phydev); - return 0; + if (irq_status == 0) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int bcm8706_match_phy_device(struct phy_device *phydev) @@ -194,9 +200,8 @@ static struct phy_driver bcm87xx_driver[] = { .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, - .ack_interrupt = bcm87xx_ack_interrupt, .config_intr = bcm87xx_config_intr, - .did_interrupt = bcm87xx_did_interrupt, + .handle_interrupt = bcm87xx_handle_interrupt, .match_phy_device = bcm8706_match_phy_device, }, { .phy_id = PHY_ID_BCM8727, @@ -206,9 +211,8 @@ static struct phy_driver bcm87xx_driver[] = { .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, - .ack_interrupt = bcm87xx_ack_interrupt, .config_intr = bcm87xx_config_intr, - .did_interrupt = bcm87xx_did_interrupt, + .handle_interrupt = bcm87xx_handle_interrupt, .match_phy_device = bcm8727_match_phy_device, } }; diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index cd271de9609b..8a4ec3222168 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -634,15 +634,43 @@ static int brcm_fet_config_intr(struct phy_device *phydev) if (reg < 0) return reg; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = brcm_fet_ack_interrupt(phydev); + if (err) + return err; + reg &= ~MII_BRCM_FET_IR_MASK; - else + err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); + } else { reg |= MII_BRCM_FET_IR_MASK; + err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); + if (err) + return err; + + err = brcm_fet_ack_interrupt(phydev); + } - err = phy_write(phydev, MII_BRCM_FET_INTREG, reg); return err; } +static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_BRCM_FET_INTREG); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (irq_status == 0) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + struct bcm53xx_phy_priv { u64 *stats; }; @@ -681,40 +709,40 @@ static struct phy_driver broadcom_drivers[] = { .name = "Broadcom BCM5411", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM54210E, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54210E", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM54612E, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54612E", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM54616S, .phy_id_mask = 0xfffffff0, @@ -722,8 +750,8 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .config_aneg = bcm54616s_config_aneg, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, .read_status = bcm54616s_read_status, .probe = bcm54616s_probe, }, { @@ -732,8 +760,8 @@ static struct phy_driver broadcom_drivers[] = { .name = "Broadcom BCM5464", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -743,8 +771,8 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM54810, .phy_id_mask = 0xfffffff0, @@ -752,8 +780,8 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, .suspend = genphy_suspend, .resume = bcm54xx_resume, }, { @@ -763,8 +791,8 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .config_init = bcm54811_config_init, .config_aneg = bcm5481_config_aneg, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, .suspend = genphy_suspend, .resume = bcm54xx_resume, }, { @@ -774,48 +802,48 @@ static struct phy_driver broadcom_drivers[] = { /* PHY_GBIT_FEATURES */ .config_init = bcm5482_config_init, .read_status = bcm5482_read_status, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610M", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM57780", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCMAC131", /* PHY_BASIC_FEATURES */ .config_init = brcm_fet_config_init, - .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, + .handle_interrupt = brcm_fet_handle_interrupt, }, { .phy_id = PHY_ID_BCM5241, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5241", /* PHY_BASIC_FEATURES */ .config_init = brcm_fet_config_init, - .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, + .handle_interrupt = brcm_fet_handle_interrupt, }, { .phy_id = PHY_ID_BCM5395, .phy_id_mask = 0xfffffff0, @@ -837,16 +865,16 @@ static struct phy_driver broadcom_drivers[] = { .get_stats = bcm53xx_phy_get_stats, .probe = bcm53xx_phy_probe, .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, }, { .phy_id = PHY_ID_BCM89610, .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM89610", /* PHY_GBIT_FEATURES */ .config_init = bcm54xx_config_init, - .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, + .handle_interrupt = bcm_phy_handle_interrupt, } }; module_phy_driver(broadcom_drivers); diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index 9d1612a4d7e6..ef5f412e101f 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -87,15 +87,42 @@ static int cis820x_config_intr(struct phy_device *phydev) { int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = cis820x_ack_interrupt(phydev); + if (err) + return err; + err = phy_write(phydev, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); - else + } else { err = phy_write(phydev, MII_CIS8201_IMASK, 0); + if (err) + return err; + + err = cis820x_ack_interrupt(phydev); + } return err; } +static irqreturn_t cis820x_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_CIS8201_ISTAT); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_CIS8201_IMASK_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + /* Cicada 8201, a.k.a Vitesse VSC8201 */ static struct phy_driver cis820x_driver[] = { { @@ -104,16 +131,16 @@ static struct phy_driver cis820x_driver[] = { .phy_id_mask = 0x000ffff0, /* PHY_GBIT_FEATURES */ .config_init = &cis820x_config_init, - .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, + .handle_interrupt = &cis820x_handle_interrupt, }, { .phy_id = 0x000fc440, .name = "Cicada Cis8204", .phy_id_mask = 0x000fffc0, /* PHY_GBIT_FEATURES */ .config_init = &cis820x_config_init, - .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, + .handle_interrupt = &cis820x_handle_interrupt, } }; module_phy_driver(cis820x_driver); diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 942f277463a4..a3b3842c67e5 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -47,6 +47,10 @@ #define MII_DM9161_INTR_STOP \ (MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) +#define MII_DM9161_INTR_CHANGE \ + (MII_DM9161_INTR_DPLX_CHANGE | \ + MII_DM9161_INTR_SPD_CHANGE | \ + MII_DM9161_INTR_LINK_CHANGE) /* DM9161 10BT Configuration/Status */ #define MII_DM9161_10BTCSR 0x12 @@ -57,24 +61,58 @@ MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); +static int dm9161_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, MII_DM9161_INTR); + + return (err < 0) ? err : 0; +} + #define DM9161_DELAY 1 static int dm9161_config_intr(struct phy_device *phydev) { - int temp; + int temp, err; temp = phy_read(phydev, MII_DM9161_INTR); if (temp < 0) return temp; - if (PHY_INTERRUPT_ENABLED == phydev->interrupts) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = dm9161_ack_interrupt(phydev); + if (err) + return err; + temp &= ~(MII_DM9161_INTR_STOP); - else + err = phy_write(phydev, MII_DM9161_INTR, temp); + } else { temp |= MII_DM9161_INTR_STOP; + err = phy_write(phydev, MII_DM9161_INTR, temp); + if (err) + return err; + + err = dm9161_ack_interrupt(phydev); + } + + return err; +} - temp = phy_write(phydev, MII_DM9161_INTR, temp); +static irqreturn_t dm9161_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_DM9161_INTR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } - return temp; + if (!(irq_status & MII_DM9161_INTR_CHANGE)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int dm9161_config_aneg(struct phy_device *phydev) @@ -132,13 +170,6 @@ static int dm9161_config_init(struct phy_device *phydev) return phy_write(phydev, MII_BMCR, BMCR_ANENABLE); } -static int dm9161_ack_interrupt(struct phy_device *phydev) -{ - int err = phy_read(phydev, MII_DM9161_INTR); - - return (err < 0) ? err : 0; -} - static struct phy_driver dm91xx_driver[] = { { .phy_id = 0x0181b880, @@ -147,8 +178,8 @@ static struct phy_driver dm91xx_driver[] = { /* PHY_BASIC_FEATURES */ .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, - .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, + .handle_interrupt = dm9161_handle_interrupt, }, { .phy_id = 0x0181b8b0, .name = "Davicom DM9161B/C", @@ -156,8 +187,8 @@ static struct phy_driver dm91xx_driver[] = { /* PHY_BASIC_FEATURES */ .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, - .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, + .handle_interrupt = dm9161_handle_interrupt, }, { .phy_id = 0x0181b8a0, .name = "Davicom DM9161A", @@ -165,15 +196,15 @@ static struct phy_driver dm91xx_driver[] = { /* PHY_BASIC_FEATURES */ .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, - .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, + .handle_interrupt = dm9161_handle_interrupt, }, { .phy_id = 0x00181b80, .name = "Davicom DM9131", .phy_id_mask = 0x0ffffff0, /* PHY_BASIC_FEATURES */ - .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, + .handle_interrupt = dm9161_handle_interrupt, } }; module_phy_driver(dm91xx_driver); diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index f2caccaf4408..0d79f68f301c 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -50,6 +50,14 @@ #define MII_DP83640_MISR_LINK_INT_EN 0x20 #define MII_DP83640_MISR_ED_INT_EN 0x40 #define MII_DP83640_MISR_LQ_INT_EN 0x80 +#define MII_DP83640_MISR_ANC_INT 0x400 +#define MII_DP83640_MISR_DUP_INT 0x800 +#define MII_DP83640_MISR_SPD_INT 0x1000 +#define MII_DP83640_MISR_LINK_INT 0x2000 +#define MII_DP83640_MISR_INT_MASK (MII_DP83640_MISR_ANC_INT |\ + MII_DP83640_MISR_DUP_INT |\ + MII_DP83640_MISR_SPD_INT |\ + MII_DP83640_MISR_LINK_INT) /* phyter seems to miss the mark by 16 ns */ #define ADJTIME_FIX 16 @@ -964,15 +972,12 @@ static void decode_status_frame(struct dp83640_private *dp83640, static int is_sync(struct sk_buff *skb, int type) { struct ptp_header *hdr; - u8 msgtype; hdr = ptp_parse_header(skb, type); if (!hdr) return 0; - msgtype = ptp_get_msgtype(hdr, type); - - return (msgtype & 0xf) == 0; + return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC; } static void dp83640_free_clocks(void) @@ -1151,6 +1156,10 @@ static int dp83640_config_intr(struct phy_device *phydev) int err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = dp83640_ack_interrupt(phydev); + if (err) + return err; + misr = phy_read(phydev, MII_DP83640_MISR); if (misr < 0) return misr; @@ -1189,10 +1198,32 @@ static int dp83640_config_intr(struct phy_device *phydev) MII_DP83640_MISR_DUP_INT_EN | MII_DP83640_MISR_SPD_INT_EN | MII_DP83640_MISR_LINK_INT_EN); - return phy_write(phydev, MII_DP83640_MISR, misr); + err = phy_write(phydev, MII_DP83640_MISR, misr); + if (err) + return err; + + return dp83640_ack_interrupt(phydev); } } +static irqreturn_t dp83640_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_DP83640_MISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_DP83640_MISR_INT_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) { struct dp83640_private *dp83640 = @@ -1515,8 +1546,8 @@ static struct phy_driver dp83640_driver = { .remove = dp83640_remove, .soft_reset = dp83640_soft_reset, .config_init = dp83640_config_init, - .ack_interrupt = dp83640_ack_interrupt, .config_intr = dp83640_config_intr, + .handle_interrupt = dp83640_handle_interrupt, }; static int __init dp83640_init(void) diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index c162c9551bd1..fff371ca1086 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -119,21 +119,6 @@ struct dp83822_private { u16 fx_sd_enable; }; -static int dp83822_ack_interrupt(struct phy_device *phydev) -{ - int err; - - err = phy_read(phydev, MII_DP83822_MISR1); - if (err < 0) - return err; - - err = phy_read(phydev, MII_DP83822_MISR2); - if (err < 0) - return err; - - return 0; -} - static int dp83822_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { @@ -303,6 +288,41 @@ static int dp83822_config_intr(struct phy_device *phydev) return phy_write(phydev, MII_DP83822_PHYSCR, physcr_status); } +static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + /* The MISR1 and MISR2 registers are holding the interrupt status in + * the upper half (15:8), while the lower half (7:0) is used for + * controlling the interrupt enable state of those individual interrupt + * sources. To determine the possible interrupt sources, just read the + * MISR* register and use it directly to know which interrupts have + * been enabled previously or not. + */ + irq_status = phy_read(phydev, MII_DP83822_MISR1); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) + goto trigger_machine; + + irq_status = phy_read(phydev, MII_DP83822_MISR2); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) + goto trigger_machine; + + return IRQ_NONE; + +trigger_machine: + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static int dp8382x_disable_wol(struct phy_device *phydev) { int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | @@ -574,8 +594,8 @@ static int dp83822_resume(struct phy_device *phydev) .read_status = dp83822_read_status, \ .get_wol = dp83822_get_wol, \ .set_wol = dp83822_set_wol, \ - .ack_interrupt = dp83822_ack_interrupt, \ .config_intr = dp83822_config_intr, \ + .handle_interrupt = dp83822_handle_interrupt, \ .suspend = dp83822_suspend, \ .resume = dp83822_resume, \ } @@ -589,8 +609,8 @@ static int dp83822_resume(struct phy_device *phydev) .config_init = dp8382x_config_init, \ .get_wol = dp83822_get_wol, \ .set_wol = dp83822_set_wol, \ - .ack_interrupt = dp83822_ack_interrupt, \ .config_intr = dp83822_config_intr, \ + .handle_interrupt = dp83822_handle_interrupt, \ .suspend = dp83822_suspend, \ .resume = dp83822_resume, \ } diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 54c7c1b44e4d..937061acfc61 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -37,6 +37,20 @@ DP83848_MISR_SPD_INT_EN | \ DP83848_MISR_LINK_INT_EN) +#define DP83848_MISR_RHF_INT BIT(8) +#define DP83848_MISR_FHF_INT BIT(9) +#define DP83848_MISR_ANC_INT BIT(10) +#define DP83848_MISR_DUP_INT BIT(11) +#define DP83848_MISR_SPD_INT BIT(12) +#define DP83848_MISR_LINK_INT BIT(13) +#define DP83848_MISR_ED_INT BIT(14) + +#define DP83848_INT_MASK \ + (DP83848_MISR_ANC_INT | \ + DP83848_MISR_DUP_INT | \ + DP83848_MISR_SPD_INT | \ + DP83848_MISR_LINK_INT) + static int dp83848_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, DP83848_MISR); @@ -53,17 +67,46 @@ static int dp83848_config_intr(struct phy_device *phydev) return control; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + ret = dp83848_ack_interrupt(phydev); + if (ret) + return ret; + control |= DP83848_MICR_INT_OE; control |= DP83848_MICR_INTEN; ret = phy_write(phydev, DP83848_MISR, DP83848_INT_EN_MASK); if (ret < 0) return ret; + + ret = phy_write(phydev, DP83848_MICR, control); } else { control &= ~DP83848_MICR_INTEN; + ret = phy_write(phydev, DP83848_MICR, control); + if (ret) + return ret; + + ret = dp83848_ack_interrupt(phydev); + } + + return ret; +} + +static irqreturn_t dp83848_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, DP83848_MISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; } - return phy_write(phydev, DP83848_MICR, control); + if (!(irq_status & DP83848_INT_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int dp83848_config_init(struct phy_device *phydev) @@ -102,8 +145,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); .resume = genphy_resume, \ \ /* IRQ related */ \ - .ack_interrupt = dp83848_ack_interrupt, \ .config_intr = dp83848_config_intr, \ + .handle_interrupt = dp83848_handle_interrupt, \ } static struct phy_driver dp83848_driver[] = { diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 69d3eacc2b96..9bd9a5c0b1db 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -288,9 +288,13 @@ static void dp83867_get_wol(struct phy_device *phydev, static int dp83867_config_intr(struct phy_device *phydev) { - int micr_status; + int micr_status, err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = dp83867_ack_interrupt(phydev); + if (err) + return err; + micr_status = phy_read(phydev, MII_DP83867_MICR); if (micr_status < 0) return micr_status; @@ -303,11 +307,41 @@ static int dp83867_config_intr(struct phy_device *phydev) MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN | MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN); - return phy_write(phydev, MII_DP83867_MICR, micr_status); + err = phy_write(phydev, MII_DP83867_MICR, micr_status); + } else { + micr_status = 0x0; + err = phy_write(phydev, MII_DP83867_MICR, micr_status); + if (err) + return err; + + err = dp83867_ack_interrupt(phydev); } - micr_status = 0x0; - return phy_write(phydev, MII_DP83867_MICR, micr_status); + return err; +} + +static irqreturn_t dp83867_handle_interrupt(struct phy_device *phydev) +{ + int irq_status, irq_enabled; + + irq_status = phy_read(phydev, MII_DP83867_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + irq_enabled = phy_read(phydev, MII_DP83867_MICR); + if (irq_enabled < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & irq_enabled)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int dp83867_read_status(struct phy_device *phydev) @@ -825,8 +859,8 @@ static struct phy_driver dp83867_driver[] = { .set_wol = dp83867_set_wol, /* IRQ related */ - .ack_interrupt = dp83867_ack_interrupt, .config_intr = dp83867_config_intr, + .handle_interrupt = dp83867_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index cf6dec7b7d8e..b30bc142d82e 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -186,9 +186,13 @@ static int dp83869_ack_interrupt(struct phy_device *phydev) static int dp83869_config_intr(struct phy_device *phydev) { - int micr_status = 0; + int micr_status = 0, err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = dp83869_ack_interrupt(phydev); + if (err) + return err; + micr_status = phy_read(phydev, MII_DP83869_MICR); if (micr_status < 0) return micr_status; @@ -201,10 +205,40 @@ static int dp83869_config_intr(struct phy_device *phydev) MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN | MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN); - return phy_write(phydev, MII_DP83869_MICR, micr_status); + err = phy_write(phydev, MII_DP83869_MICR, micr_status); + } else { + err = phy_write(phydev, MII_DP83869_MICR, micr_status); + if (err) + return err; + + err = dp83869_ack_interrupt(phydev); } - return phy_write(phydev, MII_DP83869_MICR, micr_status); + return err; +} + +static irqreturn_t dp83869_handle_interrupt(struct phy_device *phydev) +{ + int irq_status, irq_enabled; + + irq_status = phy_read(phydev, MII_DP83869_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + irq_enabled = phy_read(phydev, MII_DP83869_MICR); + if (irq_enabled < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & irq_enabled)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int dp83869_set_wol(struct phy_device *phydev, @@ -850,8 +884,8 @@ static struct phy_driver dp83869_driver[] = { .soft_reset = dp83869_phy_reset, /* IRQ related */ - .ack_interrupt = dp83869_ack_interrupt, .config_intr = dp83869_config_intr, + .handle_interrupt = dp83869_handle_interrupt, .read_status = dp83869_read_status, .get_tunable = dp83869_get_tunable, diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index d73725312c7c..688fadffb249 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -197,6 +197,10 @@ static int dp83811_config_intr(struct phy_device *phydev) int misr_status, err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = dp83811_ack_interrupt(phydev); + if (err) + return err; + misr_status = phy_read(phydev, MII_DP83811_INT_STAT1); if (misr_status < 0) return misr_status; @@ -249,11 +253,58 @@ static int dp83811_config_intr(struct phy_device *phydev) return err; err = phy_write(phydev, MII_DP83811_INT_STAT3, 0); + if (err < 0) + return err; + + err = dp83811_ack_interrupt(phydev); } return err; } +static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + /* The INT_STAT registers 1, 2 and 3 are holding the interrupt status + * in the upper half (15:8), while the lower half (7:0) is used for + * controlling the interrupt enable state of those individual interrupt + * sources. To determine the possible interrupt sources, just read the + * INT_STAT* register and use it directly to know which interrupts have + * been enabled previously or not. + */ + irq_status = phy_read(phydev, MII_DP83811_INT_STAT1); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) + goto trigger_machine; + + irq_status = phy_read(phydev, MII_DP83811_INT_STAT2); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) + goto trigger_machine; + + irq_status = phy_read(phydev, MII_DP83811_INT_STAT3); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + if (irq_status & ((irq_status & GENMASK(7, 0)) << 8)) + goto trigger_machine; + + return IRQ_NONE; + +trigger_machine: + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static int dp83811_config_aneg(struct phy_device *phydev) { int value, err; @@ -343,8 +394,8 @@ static struct phy_driver dp83811_driver[] = { .soft_reset = dp83811_phy_reset, .get_wol = dp83811_get_wol, .set_wol = dp83811_set_wol, - .ack_interrupt = dp83811_ack_interrupt, .config_intr = dp83811_config_intr, + .handle_interrupt = dp83811_handle_interrupt, .suspend = dp83811_suspend, .resume = dp83811_resume, }, diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index d6e8516cd146..b632947cbcdf 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -272,38 +272,59 @@ static int ip101a_g_config_init(struct phy_device *phydev) return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c); } +static int ip101a_g_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); + + if (err < 0) + return err; + + return 0; +} + static int ip101a_g_config_intr(struct phy_device *phydev) { u16 val; + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = ip101a_g_ack_interrupt(phydev); + if (err) + return err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) /* INTR pin used: Speed/link/duplex will cause an interrupt */ val = IP101A_G_IRQ_PIN_USED; - else + err = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val); + } else { val = IP101A_G_IRQ_ALL_MASK; + err = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val); + if (err) + return err; + + err = ip101a_g_ack_interrupt(phydev); + } - return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val); + return err; } -static int ip101a_g_did_interrupt(struct phy_device *phydev) +static irqreturn_t ip101a_g_handle_interrupt(struct phy_device *phydev) { - int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); + int irq_status; - if (val < 0) - return 0; + irq_status = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } - return val & (IP101A_G_IRQ_SPEED_CHANGE | - IP101A_G_IRQ_DUPLEX_CHANGE | - IP101A_G_IRQ_LINK_CHANGE); -} + if (!(irq_status & (IP101A_G_IRQ_SPEED_CHANGE | + IP101A_G_IRQ_DUPLEX_CHANGE | + IP101A_G_IRQ_LINK_CHANGE))) + return IRQ_NONE; -static int ip101a_g_ack_interrupt(struct phy_device *phydev) -{ - int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); - if (err < 0) - return err; + phy_trigger_machine(phydev); - return 0; + return IRQ_HANDLED; } static struct phy_driver icplus_driver[] = { @@ -332,8 +353,7 @@ static struct phy_driver icplus_driver[] = { /* PHY_BASIC_FEATURES */ .probe = ip101a_g_probe, .config_intr = ip101a_g_config_intr, - .did_interrupt = ip101a_g_did_interrupt, - .ack_interrupt = ip101a_g_ack_interrupt, + .handle_interrupt = ip101a_g_handle_interrupt, .config_init = &ip101a_g_config_init, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index b7875b36097f..6eac50d4b42f 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -209,22 +209,45 @@ static int xway_gphy_ack_interrupt(struct phy_device *phydev) return (reg < 0) ? reg : 0; } -static int xway_gphy_did_interrupt(struct phy_device *phydev) +static int xway_gphy_config_intr(struct phy_device *phydev) { - int reg; + u16 mask = 0; + int err; - reg = phy_read(phydev, XWAY_MDIO_ISTAT); - return reg & XWAY_MDIO_INIT_MASK; + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = xway_gphy_ack_interrupt(phydev); + if (err) + return err; + + mask = XWAY_MDIO_INIT_MASK; + err = phy_write(phydev, XWAY_MDIO_IMASK, mask); + } else { + err = phy_write(phydev, XWAY_MDIO_IMASK, mask); + if (err) + return err; + + err = xway_gphy_ack_interrupt(phydev); + } + + return err; } -static int xway_gphy_config_intr(struct phy_device *phydev) +static irqreturn_t xway_gphy_handle_interrupt(struct phy_device *phydev) { - u16 mask = 0; + int irq_status; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - mask = XWAY_MDIO_INIT_MASK; + irq_status = phy_read(phydev, XWAY_MDIO_ISTAT); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & XWAY_MDIO_INIT_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); - return phy_write(phydev, XWAY_MDIO_IMASK, mask); + return IRQ_HANDLED; } static struct phy_driver xway_gphy[] = { @@ -235,8 +258,7 @@ static struct phy_driver xway_gphy[] = { /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -247,8 +269,7 @@ static struct phy_driver xway_gphy[] = { /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -259,8 +280,7 @@ static struct phy_driver xway_gphy[] = { /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -271,8 +291,7 @@ static struct phy_driver xway_gphy[] = { /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -282,8 +301,7 @@ static struct phy_driver xway_gphy[] = { .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6", /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -293,8 +311,7 @@ static struct phy_driver xway_gphy[] = { .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6", /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -304,8 +321,7 @@ static struct phy_driver xway_gphy[] = { .name = "Intel XWAY PHY11G (xRX v1.1 integrated)", /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -315,8 +331,7 @@ static struct phy_driver xway_gphy[] = { .name = "Intel XWAY PHY22F (xRX v1.1 integrated)", /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -326,8 +341,7 @@ static struct phy_driver xway_gphy[] = { .name = "Intel XWAY PHY11G (xRX v1.2 integrated)", /* PHY_GBIT_FEATURES */ .config_init = xway_gphy_config_init, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, @@ -337,8 +351,7 @@ static struct phy_driver xway_gphy[] = { .name = "Intel XWAY PHY22F (xRX v1.2 integrated)", /* PHY_BASIC_FEATURES */ .config_init = xway_gphy_config_init, - .ack_interrupt = xway_gphy_ack_interrupt, - .did_interrupt = xway_gphy_did_interrupt, + .handle_interrupt = xway_gphy_handle_interrupt, .config_intr = xway_gphy_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index fec58ad69e02..0ee23d29c0d4 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -37,6 +37,8 @@ #define MII_LXT970_ISR 18 /* Interrupt Status Register */ +#define MII_LXT970_IRS_MINT BIT(15) + #define MII_LXT970_CONFIG 19 /* Configuration Register */ /* ------------------------------------------------------------------------- */ @@ -47,6 +49,7 @@ #define MII_LXT971_IER_IEN 0x00f2 #define MII_LXT971_ISR 19 /* Interrupt Status Register */ +#define MII_LXT971_ISR_MASK 0x00f0 /* register definitions for the 973 */ #define MII_LXT973_PCR 16 /* Port Configuration Register */ @@ -75,10 +78,50 @@ static int lxt970_ack_interrupt(struct phy_device *phydev) static int lxt970_config_intr(struct phy_device *phydev) { - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN); - else - return phy_write(phydev, MII_LXT970_IER, 0); + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = lxt970_ack_interrupt(phydev); + if (err) + return err; + + err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN); + } else { + err = phy_write(phydev, MII_LXT970_IER, 0); + if (err) + return err; + + err = lxt970_ack_interrupt(phydev); + } + + return err; +} + +static irqreturn_t lxt970_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + /* The interrupt status register is cleared by reading BMSR + * followed by MII_LXT970_ISR + */ + irq_status = phy_read(phydev, MII_BMSR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + irq_status = phy_read(phydev, MII_LXT970_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_LXT970_IRS_MINT)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int lxt970_config_init(struct phy_device *phydev) @@ -99,10 +142,41 @@ static int lxt971_ack_interrupt(struct phy_device *phydev) static int lxt971_config_intr(struct phy_device *phydev) { - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN); - else - return phy_write(phydev, MII_LXT971_IER, 0); + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = lxt971_ack_interrupt(phydev); + if (err) + return err; + + err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN); + } else { + err = phy_write(phydev, MII_LXT971_IER, 0); + if (err) + return err; + + err = lxt971_ack_interrupt(phydev); + } + + return err; +} + +static irqreturn_t lxt971_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_LXT971_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_LXT971_ISR_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } /* @@ -237,15 +311,15 @@ static struct phy_driver lxt97x_driver[] = { .phy_id_mask = 0xfffffff0, /* PHY_BASIC_FEATURES */ .config_init = lxt970_config_init, - .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, + .handle_interrupt = lxt970_handle_interrupt, }, { .phy_id = 0x001378e0, .name = "LXT971", .phy_id_mask = 0xfffffff0, /* PHY_BASIC_FEATURES */ - .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, + .handle_interrupt = lxt971_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, { diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5aec673a0120..620052c023a5 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -80,8 +80,11 @@ #define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 #define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 #define MII_M1111_HWCFG_MODE_RTBI 0x7 +#define MII_M1111_HWCFG_MODE_COPPER_1000X_AN 0x8 #define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9 #define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb +#define MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN 0xc +#define MII_M1111_HWCFG_SERIAL_AN_BYPASS BIT(12) #define MII_M1111_HWCFG_FIBER_COPPER_RES BIT(13) #define MII_M1111_HWCFG_FIBER_COPPER_AUTO BIT(15) @@ -314,16 +317,43 @@ static int marvell_config_intr(struct phy_device *phydev) { int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = marvell_ack_interrupt(phydev); + if (err) + return err; + err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT); - else + } else { err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR); + if (err) + return err; + + err = marvell_ack_interrupt(phydev); + } return err; } +static irqreturn_t marvell_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_M1011_IEVENT); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_M1011_IMASK_INIT)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static int marvell_set_polarity(struct phy_device *phydev, int polarity) { int reg; @@ -629,6 +659,51 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) return genphy_check_and_restart_aneg(phydev, changed); } +static int m88e1111_config_aneg(struct phy_device *phydev) +{ + int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR); + int err; + + if (extsr < 0) + return extsr; + + /* If not using SGMII or copper 1000BaseX modes, use normal process. + * Steps below are only required for these modes. + */ + if (phydev->interface != PHY_INTERFACE_MODE_SGMII && + (extsr & MII_M1111_HWCFG_MODE_MASK) != + MII_M1111_HWCFG_MODE_COPPER_1000X_AN) + return marvell_config_aneg(phydev); + + err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + if (err < 0) + goto error; + + /* Configure the copper link first */ + err = marvell_config_aneg(phydev); + if (err < 0) + goto error; + + /* Do not touch the fiber page if we're in copper->sgmii mode */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) + return 0; + + /* Then the fiber link */ + err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); + if (err < 0) + goto error; + + err = marvell_config_aneg_fiber(phydev); + if (err < 0) + goto error; + + return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + +error: + marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + return err; +} + static int m88e1510_config_aneg(struct phy_device *phydev) { int err; @@ -696,7 +771,7 @@ static void marvell_config_led(struct phy_device *phydev) static int marvell_config_init(struct phy_device *phydev) { - /* Set defalut LED */ + /* Set default LED */ marvell_config_led(phydev); /* Set registers from marvell,reg-init DT property */ @@ -814,6 +889,28 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev) MII_M1111_HWCFG_FIBER_COPPER_AUTO); } +static int m88e1111_config_init_1000basex(struct phy_device *phydev) +{ + int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR); + int err, mode; + + if (extsr < 0) + return extsr; + + /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */ + mode = extsr & MII_M1111_HWCFG_MODE_MASK; + if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) { + err = phy_modify(phydev, MII_M1111_PHY_EXT_SR, + MII_M1111_HWCFG_MODE_MASK | + MII_M1111_HWCFG_SERIAL_AN_BYPASS, + MII_M1111_HWCFG_MODE_COPPER_1000X_AN | + MII_M1111_HWCFG_SERIAL_AN_BYPASS); + if (err < 0) + return err; + } + return 0; +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -836,6 +933,12 @@ static int m88e1111_config_init(struct phy_device *phydev) return err; } + if (phydev->interface == PHY_INTERFACE_MODE_1000BASEX) { + err = m88e1111_config_init_1000basex(phydev); + if (err < 0) + return err; + } + err = marvell_of_reg_init(phydev); if (err < 0) return err; @@ -1029,8 +1132,8 @@ static int m88e1510_config_init(struct phy_device *phydev) return err; /* PHY reset is necessary after changing MODE[2:0] */ - err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0, - MII_88E1510_GEN_CTRL_REG_1_RESET); + err = phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_RESET); if (err < 0) return err; @@ -1583,18 +1686,6 @@ static int marvell_aneg_done(struct phy_device *phydev) return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED); } -static int m88e1121_did_interrupt(struct phy_device *phydev) -{ - int imask; - - imask = phy_read(phydev, MII_M1011_IEVENT); - - if (imask & MII_M1011_IMASK_INIT) - return 1; - - return 0; -} - static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { @@ -1634,8 +1725,8 @@ static int m88e1318_set_wol(struct phy_device *phydev, __phy_read(phydev, MII_M1011_IEVENT); /* Enable the WOL interrupt */ - err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, - MII_88E1318S_PHY_CSIER_WOL_EIE); + err = __phy_set_bits(phydev, MII_88E1318S_PHY_CSIER, + MII_88E1318S_PHY_CSIER_WOL_EIE); if (err < 0) goto error; @@ -1673,9 +1764,9 @@ static int m88e1318_set_wol(struct phy_device *phydev, goto error; /* Clear WOL status and enable magic packet matching */ - err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0, - MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS | - MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE); + err = __phy_set_bits(phydev, MII_88E1318S_PHY_WOL_CTRL, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS | + MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE); if (err < 0) goto error; } else { @@ -1904,7 +1995,7 @@ static int marvell_cable_test_start_common(struct phy_device *phydev) return bmsr; if (bmcr & BMCR_ANENABLE) { - ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0); + ret = phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE); if (ret < 0) return ret; ret = genphy_soft_reset(phydev); @@ -2621,8 +2712,8 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = marvell_config_init, .config_aneg = m88e1101_config_aneg, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2639,8 +2730,8 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = m88e1111_config_init, .config_aneg = marvell_config_aneg, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2658,10 +2749,31 @@ static struct phy_driver marvell_drivers[] = { /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = m88e1111_config_init, - .config_aneg = marvell_config_aneg, + .config_aneg = m88e1111_config_aneg, + .read_status = marvell_read_status, + .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1111_get_tunable, + .set_tunable = m88e1111_set_tunable, + }, + { + .phy_id = MARVELL_PHY_ID_88E1111_FINISAR, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1111 (Finisar)", + /* PHY_GBIT_FEATURES */ + .probe = marvell_probe, + .config_init = m88e1111_config_init, + .config_aneg = m88e1111_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2680,8 +2792,8 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = m88e1118_config_init, .config_aneg = m88e1118_config_aneg, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2699,9 +2811,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = marvell_config_init, .config_aneg = m88e1121_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2721,9 +2832,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = m88e1318_config_init, .config_aneg = m88e1318_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .get_wol = m88e1318_get_wol, .set_wol = m88e1318_set_wol, .resume = genphy_resume, @@ -2743,8 +2853,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = m88e1145_config_init, .config_aneg = m88e1101_config_aneg, .read_status = genphy_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2763,8 +2873,8 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = m88e1149_config_init, .config_aneg = m88e1118_config_aneg, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2781,8 +2891,8 @@ static struct phy_driver marvell_drivers[] = { .probe = marvell_probe, .config_init = m88e1111_config_init, .config_aneg = marvell_config_aneg, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2798,8 +2908,8 @@ static struct phy_driver marvell_drivers[] = { /* PHY_GBIT_FEATURES */ .probe = marvell_probe, .config_init = m88e1116r_config_init, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2820,9 +2930,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = m88e1510_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .get_wol = m88e1318_get_wol, .set_wol = m88e1318_set_wol, .resume = marvell_resume, @@ -2849,9 +2958,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2875,9 +2983,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2900,9 +3007,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = m88e3016_config_init, .aneg_done = marvell_aneg_done, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2921,9 +3027,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = marvell_config_init, .config_aneg = m88e6390_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2946,9 +3051,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2968,9 +3072,8 @@ static struct phy_driver marvell_drivers[] = { .config_init = marvell_config_init, .config_aneg = m88e1510_config_aneg, .read_status = marvell_read_status, - .ack_interrupt = marvell_ack_interrupt, .config_intr = marvell_config_intr, - .did_interrupt = m88e1121_did_interrupt, + .handle_interrupt = marvell_handle_interrupt, .resume = genphy_resume, .suspend = genphy_suspend, .read_page = marvell_read_page, @@ -2989,6 +3092,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1121R, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1145, MARVELL_PHY_ID_MASK }, diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 757e950fb745..2b42e46066b4 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -472,7 +472,7 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio, #endif /** - * mdiobus_create_device_from_board_info - create a full MDIO device given + * mdiobus_create_device - create a full MDIO device given * a mdio_board_info structure * @bus: MDIO bus to create the devices on * @bi: mdio_board_info structure describing the devices @@ -546,10 +546,11 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) /* de-assert bus level PHY GPIO reset */ gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { - dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", - bus->id); + err = dev_err_probe(&bus->dev, PTR_ERR(gpiod), + "mii_bus %s couldn't get reset GPIO\n", + bus->id); device_del(&bus->dev); - return PTR_ERR(gpiod); + return err; } else if (gpiod) { bus->reset_gpiod = gpiod; diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index e8f2ca625837..7e7904fee1d9 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -204,22 +204,45 @@ static int meson_gxl_config_intr(struct phy_device *phydev) int ret; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* Ack any pending IRQ */ + ret = meson_gxl_ack_interrupt(phydev); + if (ret) + return ret; + val = INTSRC_ANEG_PR | INTSRC_PARALLEL_FAULT | INTSRC_ANEG_LP_ACK | INTSRC_LINK_DOWN | INTSRC_REMOTE_FAULT | INTSRC_ANEG_COMPLETE; + ret = phy_write(phydev, INTSRC_MASK, val); } else { val = 0; + ret = phy_write(phydev, INTSRC_MASK, val); + + /* Ack any pending IRQ */ + ret = meson_gxl_ack_interrupt(phydev); } - /* Ack any pending IRQ */ - ret = meson_gxl_ack_interrupt(phydev); - if (ret) - return ret; + return ret; +} + +static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, INTSRC_FLAG); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (irq_status == 0) + return IRQ_NONE; + + phy_trigger_machine(phydev); - return phy_write(phydev, INTSRC_MASK, val); + return IRQ_HANDLED; } static struct phy_driver meson_gxl_phy[] = { @@ -231,8 +254,8 @@ static struct phy_driver meson_gxl_phy[] = { .soft_reset = genphy_soft_reset, .config_init = meson_gxl_config_init, .read_status = meson_gxl_read_status, - .ack_interrupt = meson_gxl_ack_interrupt, .config_intr = meson_gxl_config_intr, + .handle_interrupt = meson_gxl_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -241,8 +264,8 @@ static struct phy_driver meson_gxl_phy[] = { /* PHY_BASIC_FEATURES */ .flags = PHY_IS_INTERNAL, .soft_reset = genphy_soft_reset, - .ack_interrupt = meson_gxl_ack_interrupt, .config_intr = meson_gxl_config_intr, + .handle_interrupt = meson_gxl_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index a7f74b3b97af..54e0d75203da 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -48,6 +48,10 @@ #define KSZPHY_INTCS_LINK_UP BIT(8) #define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\ KSZPHY_INTCS_LINK_DOWN) +#define KSZPHY_INTCS_LINK_DOWN_STATUS BIT(2) +#define KSZPHY_INTCS_LINK_UP_STATUS BIT(0) +#define KSZPHY_INTCS_STATUS (KSZPHY_INTCS_LINK_DOWN_STATUS |\ + KSZPHY_INTCS_LINK_UP_STATUS) /* PHY Control 1 */ #define MII_KSZPHY_CTRL_1 0x1e @@ -158,7 +162,7 @@ static int kszphy_ack_interrupt(struct phy_device *phydev) static int kszphy_config_intr(struct phy_device *phydev) { const struct kszphy_type *type = phydev->drv->driver_data; - int temp; + int temp, err; u16 mask; if (type && type->interrupt_level_mask) @@ -174,12 +178,41 @@ static int kszphy_config_intr(struct phy_device *phydev) phy_write(phydev, MII_KSZPHY_CTRL, temp); /* enable / disable interrupts */ - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = kszphy_ack_interrupt(phydev); + if (err) + return err; + temp = KSZPHY_INTCS_ALL; - else + err = phy_write(phydev, MII_KSZPHY_INTCS, temp); + } else { temp = 0; + err = phy_write(phydev, MII_KSZPHY_INTCS, temp); + if (err) + return err; + + err = kszphy_ack_interrupt(phydev); + } + + return err; +} + +static irqreturn_t kszphy_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_KSZPHY_INTCS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & KSZPHY_INTCS_STATUS)) + return IRQ_NONE; + + phy_trigger_machine(phydev); - return phy_write(phydev, MII_KSZPHY_INTCS, temp); + return IRQ_HANDLED; } static int kszphy_rmii_clk_sel(struct phy_device *phydev, bool val) @@ -1160,8 +1193,8 @@ static struct phy_driver ksphy_driver[] = { /* PHY_BASIC_FEATURES */ .driver_data = &ks8737_type, .config_init = kszphy_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -1172,8 +1205,8 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1187,8 +1220,8 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1203,8 +1236,8 @@ static struct phy_driver ksphy_driver[] = { .probe = kszphy_probe, .config_init = ksz8041_config_init, .config_aneg = ksz8041_config_aneg, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1218,8 +1251,8 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1231,8 +1264,8 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8051_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1247,8 +1280,8 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1262,8 +1295,8 @@ static struct phy_driver ksphy_driver[] = { .driver_data = &ksz8081_type, .probe = kszphy_probe, .config_init = ksz8081_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1275,8 +1308,8 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, /* PHY_BASIC_FEATURES */ .config_init = ksz8061_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -1288,8 +1321,8 @@ static struct phy_driver ksphy_driver[] = { .probe = kszphy_probe, .get_features = ksz9031_get_features, .config_init = ksz9021_config_init, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1307,8 +1340,8 @@ static struct phy_driver ksphy_driver[] = { .config_init = ksz9031_config_init, .soft_reset = genphy_soft_reset, .read_status = ksz9031_read_status, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, @@ -1336,8 +1369,8 @@ static struct phy_driver ksphy_driver[] = { .probe = kszphy_probe, .config_init = ksz9131_config_init, .read_status = genphy_read_status, - .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, + .handle_interrupt = kszphy_handle_interrupt, .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index a644e8e5071c..9f1f2b6c97d4 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -44,16 +44,32 @@ static int lan88xx_phy_config_intr(struct phy_device *phydev) LAN88XX_INT_MASK_LINK_CHANGE_); } else { rc = phy_write(phydev, LAN88XX_INT_MASK, 0); + if (rc) + return rc; + + /* Ack interrupts after they have been disabled */ + rc = phy_read(phydev, LAN88XX_INT_STS); } return rc < 0 ? rc : 0; } -static int lan88xx_phy_ack_interrupt(struct phy_device *phydev) +static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev) { - int rc = phy_read(phydev, LAN88XX_INT_STS); + int irq_status; - return rc < 0 ? rc : 0; + irq_status = phy_read(phydev, LAN88XX_INT_STS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int lan88xx_suspend(struct phy_device *phydev) @@ -340,8 +356,8 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, - .ack_interrupt = lan88xx_phy_ack_interrupt, .config_intr = lan88xx_phy_config_intr, + .handle_interrupt = lan88xx_handle_interrupt, .suspend = lan88xx_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index fed3e395f18e..4dc00bd5a8d2 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -189,18 +189,34 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev) rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF); rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN; - } + rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); + } else { + rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); + if (rc) + return rc; - rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); + rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + } return rc < 0 ? rc : 0; } -static int lan87xx_phy_ack_interrupt(struct phy_device *phydev) +static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev) { - int rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + int irq_status; - return rc < 0 ? rc : 0; + irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (irq_status == 0) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int lan87xx_config_init(struct phy_device *phydev) @@ -219,10 +235,9 @@ static struct phy_driver microchip_t1_phy_driver[] = { .features = PHY_BASIC_T1_FEATURES, .config_init = lan87xx_config_init, - .config_aneg = genphy_config_aneg, - .ack_interrupt = lan87xx_phy_ack_interrupt, .config_intr = lan87xx_phy_config_intr, + .handle_interrupt = lan87xx_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 6bc7406a1ce7..2f2157e3deab 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1498,7 +1498,7 @@ static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev) vsc8584_handle_macsec_interrupt(phydev); if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG) - phy_mac_interrupt(phydev); + phy_trigger_machine(phydev); return IRQ_HANDLED; } @@ -1541,16 +1541,6 @@ static int vsc85xx_config_init(struct phy_device *phydev) return 0; } -static int vsc8584_did_interrupt(struct phy_device *phydev) -{ - int rc = 0; - - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); - - return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK; -} - static int vsc8514_config_pre_init(struct phy_device *phydev) { /* These are the settings to override the silicon default @@ -1933,6 +1923,10 @@ static int vsc85xx_config_intr(struct phy_device *phydev) int rc; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + rc = vsc85xx_ack_interrupt(phydev); + if (rc) + return rc; + vsc8584_config_macsec_intr(phydev); vsc8584_config_ts_intr(phydev); @@ -1943,11 +1937,33 @@ static int vsc85xx_config_intr(struct phy_device *phydev) if (rc < 0) return rc; rc = phy_read(phydev, MII_VSC85XX_INT_STATUS); + if (rc < 0) + return rc; + + rc = vsc85xx_ack_interrupt(phydev); } return rc; } +static irqreturn_t vsc85xx_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_VSC85XX_INT_MASK_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static int vsc85xx_config_aneg(struct phy_device *phydev) { int rc; @@ -2114,7 +2130,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, @@ -2139,9 +2155,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_aneg = &vsc85xx_config_aneg, .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8574_probe, @@ -2163,7 +2178,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc8514_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, @@ -2187,7 +2202,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, @@ -2211,7 +2226,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, @@ -2235,7 +2250,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, @@ -2259,7 +2274,7 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, .suspend = &genphy_suspend, .resume = &genphy_resume, @@ -2283,9 +2298,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8574_probe, @@ -2308,9 +2322,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8584_probe, @@ -2333,9 +2346,7 @@ static struct phy_driver vsc85xx_driver[] = { .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, .handle_interrupt = &vsc8584_handle_interrupt, - .ack_interrupt = &vsc85xx_ack_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8574_probe, @@ -2359,9 +2370,8 @@ static struct phy_driver vsc85xx_driver[] = { .config_aneg = &vsc85xx_config_aneg, .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, - .ack_interrupt = &vsc85xx_ack_interrupt, + .handle_interrupt = vsc85xx_handle_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8574_probe, @@ -2386,9 +2396,7 @@ static struct phy_driver vsc85xx_driver[] = { .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, .handle_interrupt = &vsc8584_handle_interrupt, - .ack_interrupt = &vsc85xx_ack_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8584_probe, @@ -2411,9 +2419,7 @@ static struct phy_driver vsc85xx_driver[] = { .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, .handle_interrupt = &vsc8584_handle_interrupt, - .ack_interrupt = &vsc85xx_ack_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8584_probe, @@ -2436,9 +2442,7 @@ static struct phy_driver vsc85xx_driver[] = { .aneg_done = &genphy_aneg_done, .read_status = &vsc85xx_read_status, .handle_interrupt = &vsc8584_handle_interrupt, - .ack_interrupt = &vsc85xx_ack_interrupt, .config_intr = &vsc85xx_config_intr, - .did_interrupt = &vsc8584_did_interrupt, .suspend = &genphy_suspend, .resume = &genphy_resume, .probe = &vsc8584_probe, diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index b97ee79f3cdf..924ed5b034a4 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -136,7 +136,7 @@ static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk, phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588); - if (!cond || (cond && upper)) + if (!cond || upper) phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper); phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower); @@ -506,9 +506,9 @@ static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk) { struct vsc8531_private *vsc8531 = phydev->priv; bool base = phydev->mdio.addr == vsc8531->ts_base_addr; - enum vsc85xx_ptp_msg_type msgs[] = { - PTP_MSG_TYPE_SYNC, - PTP_MSG_TYPE_DELAY_REQ + u8 msgs[] = { + PTP_MSGTYPE_SYNC, + PTP_MSGTYPE_DELAY_REQ }; u32 val; u8 i; @@ -847,9 +847,9 @@ static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk, bool one_step, bool enable) { - enum vsc85xx_ptp_msg_type msgs[] = { - PTP_MSG_TYPE_SYNC, - PTP_MSG_TYPE_DELAY_REQ + u8 msgs[] = { + PTP_MSGTYPE_SYNC, + PTP_MSGTYPE_DELAY_REQ }; u32 val; u8 i; @@ -858,7 +858,7 @@ static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk, if (blk == INGRESS) vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i], PTP_WRITE_NS); - else if (msgs[i] == PTP_MSG_TYPE_SYNC && one_step) + else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step) /* no need to know Sync t when sending in one_step */ vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i], PTP_WRITE_1588); @@ -1510,6 +1510,8 @@ void vsc8584_config_ts_intr(struct phy_device *phydev) int vsc8584_ptp_init(struct phy_device *phydev) { switch (phydev->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_VSC8572: + case PHY_ID_VSC8574: case PHY_ID_VSC8575: case PHY_ID_VSC8582: case PHY_ID_VSC8584: diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h index 3ea163af0f4f..da3465360e90 100644 --- a/drivers/net/phy/mscc/mscc_ptp.h +++ b/drivers/net/phy/mscc/mscc_ptp.h @@ -436,11 +436,6 @@ enum ptp_cmd { PTP_SAVE_IN_TS_FIFO = 11, /* invalid when writing in reg */ }; -enum vsc85xx_ptp_msg_type { - PTP_MSG_TYPE_SYNC, - PTP_MSG_TYPE_DELAY_REQ, -}; - struct vsc85xx_ptphdr { u8 tsmt; /* transportSpecific | messageType */ u8 ver; /* reserved0 | versionPTP */ diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index a5bf0874c7d8..5a8c8eb18582 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -63,19 +63,6 @@ static void ns_exp_write(struct phy_device *phydev, u16 reg, u8 data) phy_write(phydev, NS_EXP_MEM_DATA, data); } -static int ns_config_intr(struct phy_device *phydev) -{ - int err; - - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_write(phydev, DP83865_INT_MASK, - DP83865_INT_MASK_DEFAULT); - else - err = phy_write(phydev, DP83865_INT_MASK, 0); - - return err; -} - static int ns_ack_interrupt(struct phy_device *phydev) { int ret = phy_read(phydev, DP83865_INT_STATUS); @@ -89,6 +76,49 @@ static int ns_ack_interrupt(struct phy_device *phydev) return ret; } +static irqreturn_t ns_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, DP83865_INT_STATUS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & DP83865_INT_MASK_DEFAULT)) + return IRQ_NONE; + + /* clear the interrupt */ + phy_write(phydev, DP83865_INT_CLEAR, irq_status & ~0x7); + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static int ns_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = ns_ack_interrupt(phydev); + if (err) + return err; + + err = phy_write(phydev, DP83865_INT_MASK, + DP83865_INT_MASK_DEFAULT); + } else { + err = phy_write(phydev, DP83865_INT_MASK, 0); + if (err) + return err; + + err = ns_ack_interrupt(phydev); + } + + return err; +} + static void ns_giga_speed_fallback(struct phy_device *phydev, int mode) { int bmcr = phy_read(phydev, MII_BMCR); @@ -133,8 +163,8 @@ static struct phy_driver dp83865_driver[] = { { .name = "NatSemi DP83865", /* PHY_GBIT_FEATURES */ .config_init = ns_config_init, - .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, + .handle_interrupt = ns_handle_interrupt, } }; module_phy_driver(dp83865_driver); diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index a72fa0d2e7c7..afd7afa1f498 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -44,6 +44,9 @@ #define MII_CFG2_SLEEP_REQUEST_TO_16MS 0x3 #define MII_INTSRC 21 +#define MII_INTSRC_LINK_FAIL BIT(10) +#define MII_INTSRC_LINK_UP BIT(9) +#define MII_INTSRC_MASK (MII_INTSRC_LINK_FAIL | MII_INTSRC_LINK_UP) #define MII_INTSRC_TEMP_ERR BIT(1) #define MII_INTSRC_UV_ERR BIT(3) @@ -597,11 +600,42 @@ static int tja11xx_ack_interrupt(struct phy_device *phydev) static int tja11xx_config_intr(struct phy_device *phydev) { int value = 0; + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = tja11xx_ack_interrupt(phydev); + if (err) + return err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP; + err = phy_write(phydev, MII_INTEN, value); + } else { + err = phy_write(phydev, MII_INTEN, value); + if (err) + return err; + + err = tja11xx_ack_interrupt(phydev); + } + + return err; +} + +static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_INTSRC); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_INTSRC_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); - return phy_write(phydev, MII_INTEN, value); + return IRQ_HANDLED; } static int tja11xx_cable_test_start(struct phy_device *phydev) @@ -747,8 +781,8 @@ static struct phy_driver tja11xx_driver[] = { .get_sset_count = tja11xx_get_sset_count, .get_strings = tja11xx_get_strings, .get_stats = tja11xx_get_stats, - .ack_interrupt = tja11xx_ack_interrupt, .config_intr = tja11xx_config_intr, + .handle_interrupt = tja11xx_handle_interrupt, .cable_test_start = tja11xx_cable_test_start, .cable_test_get_status = tja11xx_cable_test_get_status, }, { @@ -770,8 +804,8 @@ static struct phy_driver tja11xx_driver[] = { .get_sset_count = tja11xx_get_sset_count, .get_strings = tja11xx_get_strings, .get_stats = tja11xx_get_stats, - .ack_interrupt = tja11xx_ack_interrupt, .config_intr = tja11xx_config_intr, + .handle_interrupt = tja11xx_handle_interrupt, .cable_test_start = tja11xx_cable_test_start, .cable_test_get_status = tja11xx_cable_test_get_status, } diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index bd11e62bfdfe..077f2929c45e 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -9,7 +9,7 @@ #include <linux/phy.h> /** - * genphy_c45_setup_forced - configures a forced speed + * genphy_c45_pma_setup_forced - configures a forced speed * @phydev: target phy_device struct */ int genphy_c45_pma_setup_forced(struct phy_device *phydev) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 35525a671400..45f75533c47c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -114,23 +114,6 @@ void phy_print_status(struct phy_device *phydev) EXPORT_SYMBOL(phy_print_status); /** - * phy_clear_interrupt - Ack the phy device's interrupt - * @phydev: the phy_device struct - * - * If the @phydev driver has an ack_interrupt function, call it to - * ack and clear the phy device's interrupt. - * - * Returns 0 on success or < 0 on error. - */ -static int phy_clear_interrupt(struct phy_device *phydev) -{ - if (phydev->drv->ack_interrupt) - return phydev->drv->ack_interrupt(phydev); - - return 0; -} - -/** * phy_config_interrupt - configure the PHY device for the requested interrupts * @phydev: the phy_device struct * @interrupts: interrupt flags to configure for this @phydev @@ -489,14 +472,15 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies) EXPORT_SYMBOL(phy_queue_state_machine); /** - * phy_queue_state_machine - Trigger the state machine to run now + * phy_trigger_machine - Trigger the state machine to run now * * @phydev: the phy_device struct */ -static void phy_trigger_machine(struct phy_device *phydev) +void phy_trigger_machine(struct phy_device *phydev) { phy_queue_state_machine(phydev, 0); } +EXPORT_SYMBOL(phy_trigger_machine); static void phy_abort_cable_test(struct phy_device *phydev) { @@ -924,7 +908,7 @@ void phy_stop_machine(struct phy_device *phydev) * Must not be called from interrupt context, or while the * phydev->lock is held. */ -static void phy_error(struct phy_device *phydev) +void phy_error(struct phy_device *phydev) { WARN_ON(1); @@ -934,6 +918,7 @@ static void phy_error(struct phy_device *phydev) phy_trigger_machine(phydev); } +EXPORT_SYMBOL(phy_error); /** * phy_disable_interrupts - Disable the PHY interrupts from the PHY side @@ -941,15 +926,8 @@ static void phy_error(struct phy_device *phydev) */ int phy_disable_interrupts(struct phy_device *phydev) { - int err; - /* Disable PHY interrupts */ - err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); - if (err) - return err; - - /* Clear the interrupt */ - return phy_clear_interrupt(phydev); + return phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED); } /** @@ -964,22 +942,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) struct phy_device *phydev = phy_dat; struct phy_driver *drv = phydev->drv; - if (drv->handle_interrupt) - return drv->handle_interrupt(phydev); - - if (drv->did_interrupt && !drv->did_interrupt(phydev)) - return IRQ_NONE; - - /* reschedule state queue work to run as soon as possible */ - phy_trigger_machine(phydev); - - /* did_interrupt() may have cleared the interrupt already */ - if (!drv->did_interrupt && phy_clear_interrupt(phydev)) { - phy_error(phydev); - return IRQ_NONE; - } - - return IRQ_HANDLED; + return drv->handle_interrupt(phydev); } /** @@ -988,11 +951,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) */ static int phy_enable_interrupts(struct phy_device *phydev) { - int err = phy_clear_interrupt(phydev); - - if (err < 0) - return err; - return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 5dab6be6fc38..80c2e646c093 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1156,7 +1156,7 @@ void phy_attached_info(struct phy_device *phydev) } EXPORT_SYMBOL(phy_attached_info); -#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)" +#define ATTACHED_FMT "attached PHY driver %s(mii_bus:phy_addr=%s, irq=%s)" char *phy_attached_info_irq(struct phy_device *phydev) { char *irq_str; @@ -1181,19 +1181,17 @@ EXPORT_SYMBOL(phy_attached_info_irq); void phy_attached_print(struct phy_device *phydev, const char *fmt, ...) { - const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; + const char *unbound = phydev->drv ? "" : "[unbound] "; char *irq_str = phy_attached_info_irq(phydev); if (!fmt) { - phydev_info(phydev, ATTACHED_FMT "\n", - drv_name, phydev_name(phydev), - irq_str); + phydev_info(phydev, ATTACHED_FMT "\n", unbound, + phydev_name(phydev), irq_str); } else { va_list ap; - phydev_info(phydev, ATTACHED_FMT, - drv_name, phydev_name(phydev), - irq_str); + phydev_info(phydev, ATTACHED_FMT, unbound, + phydev_name(phydev), irq_str); va_start(ap, fmt); vprintk(fmt, ap); @@ -2463,6 +2461,19 @@ int genphy_soft_reset(struct phy_device *phydev) } EXPORT_SYMBOL(genphy_soft_reset); +irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev) +{ + /* It seems there are cases where the interrupts are handled by another + * entity (ie an IRQ controller embedded inside the PHY) and do not + * need any other interraction from phylib. In this case, just trigger + * the state machine directly. + */ + phy_trigger_machine(phydev); + + return 0; +} +EXPORT_SYMBOL(genphy_handle_interrupt_no_ack); + /** * genphy_read_abilities - read PHY abilities from Clause 22 registers * @phydev: target phy_device struct @@ -2735,7 +2746,7 @@ static int phy_get_int_delay_property(struct device *dev, const char *name) #endif /** - * phy_get_delay_index - returns the index of the internal delay + * phy_get_internal_delay - returns the index of the internal delay * @phydev: phy_device struct * @dev: pointer to the devices device struct * @delay_values: array of delays the PHY supports @@ -2815,7 +2826,7 @@ EXPORT_SYMBOL(phy_get_internal_delay); static bool phy_drv_supports_irq(struct phy_driver *phydrv) { - return phydrv->config_intr && phydrv->ack_interrupt; + return phydrv->config_intr && phydrv->handle_interrupt; } /** @@ -2947,6 +2958,13 @@ static int phy_remove(struct device *dev) return 0; } +static void phy_shutdown(struct device *dev) +{ + struct phy_device *phydev = to_phy_device(dev); + + phy_disable_interrupts(phydev); +} + /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register @@ -2970,6 +2988,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.bus = &mdio_bus_type; new_driver->mdiodrv.driver.probe = phy_probe; new_driver->mdiodrv.driver.remove = phy_remove; + new_driver->mdiodrv.driver.shutdown = phy_shutdown; new_driver->mdiodrv.driver.owner = owner; new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index 59a94e07e7c5..f550576eb9da 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -66,11 +66,11 @@ static void phy_led_trigger_format_name(struct phy_device *phy, char *buf, static int phy_led_trigger_register(struct phy_device *phy, struct phy_led_trigger *plt, - unsigned int speed) + unsigned int speed, + const char *suffix) { plt->speed = speed; - phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name), - phy_speed_to_str(speed)); + phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name), suffix); plt->trigger.name = plt->name; return led_trigger_register(&plt->trigger); @@ -99,12 +99,7 @@ int phy_led_triggers_register(struct phy_device *phy) goto out_clear; } - phy_led_trigger_format_name(phy, phy->led_link_trigger->name, - sizeof(phy->led_link_trigger->name), - "link"); - phy->led_link_trigger->trigger.name = phy->led_link_trigger->name; - - err = led_trigger_register(&phy->led_link_trigger->trigger); + err = phy_led_trigger_register(phy, phy->led_link_trigger, 0, "link"); if (err) goto out_free_link; @@ -119,7 +114,8 @@ int phy_led_triggers_register(struct phy_device *phy) for (i = 0; i < phy->phy_num_led_triggers; i++) { err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i], - speeds[i]); + speeds[i], + phy_speed_to_str(speeds[i])); if (err) goto out_unreg; } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index fe2296fdda19..84f6e197f965 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1649,7 +1649,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam); /** - * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error + * phylink_get_eee_err() - read the energy efficient ethernet error * counter * @pl: a pointer to a &struct phylink returned from phylink_create(). * @@ -2515,9 +2515,10 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, changed = ret > 0; + /* Ensure ISOLATE bit is disabled */ bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0; ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR, - BMCR_ANENABLE, bmcr); + BMCR_ANENABLE | BMCR_ISOLATE, bmcr); if (ret < 0) return ret; diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index 1b15a991ee06..d5c1aaa8236a 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -75,6 +75,10 @@ static int qs6612_ack_interrupt(struct phy_device *phydev) { int err; + /* The Interrupt Source register is not self-clearing, bits 4 and 5 are + * cleared when MII_BMSR is read and bits 1 and 3 are cleared when + * MII_EXPANSION is read + */ err = phy_read(phydev, MII_QS6612_ISR); if (err < 0) @@ -96,24 +100,56 @@ static int qs6612_ack_interrupt(struct phy_device *phydev) static int qs6612_config_intr(struct phy_device *phydev) { int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* clear any interrupts before enabling them */ + err = qs6612_ack_interrupt(phydev); + if (err) + return err; + err = phy_write(phydev, MII_QS6612_IMR, MII_QS6612_IMR_INIT); - else + } else { err = phy_write(phydev, MII_QS6612_IMR, 0); + if (err) + return err; + + /* clear any leftover interrupts */ + err = qs6612_ack_interrupt(phydev); + } return err; } +static irqreturn_t qs6612_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, MII_QS6612_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_QS6612_IMR_INIT)) + return IRQ_NONE; + + /* the interrupt source register is not self-clearing */ + qs6612_ack_interrupt(phydev); + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static struct phy_driver qs6612_driver[] = { { .phy_id = 0x00181440, .name = "QS6612", .phy_id_mask = 0xfffffff0, /* PHY_BASIC_FEATURES */ .config_init = qs6612_config_init, - .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, + .handle_interrupt = qs6612_handle_interrupt, } }; module_phy_driver(qs6612_driver); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 575580d3ffe0..99ecd6c4c15a 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -41,6 +41,12 @@ #define RTL8211E_RX_DELAY BIT(11) #define RTL8201F_ISR 0x1e +#define RTL8201F_ISR_ANERR BIT(15) +#define RTL8201F_ISR_DUPLEX BIT(13) +#define RTL8201F_ISR_LINK BIT(11) +#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \ + RTL8201F_ISR_DUPLEX | \ + RTL8201F_ISR_LINK) #define RTL8201F_IER 0x13 #define RTL8366RB_POWER_SAVE 0x15 @@ -102,24 +108,45 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev) static int rtl8201_config_intr(struct phy_device *phydev) { u16 val; + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = rtl8201_ack_interrupt(phydev); + if (err) + return err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) val = BIT(13) | BIT(12) | BIT(11); - else + err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val); + } else { val = 0; + err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val); + if (err) + return err; - return phy_write_paged(phydev, 0x7, RTL8201F_IER, val); + err = rtl8201_ack_interrupt(phydev); + } + + return err; } static int rtl8211b_config_intr(struct phy_device *phydev) { int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = rtl821x_ack_interrupt(phydev); + if (err) + return err; + err = phy_write(phydev, RTL821x_INER, RTL8211B_INER_INIT); - else + } else { err = phy_write(phydev, RTL821x_INER, 0); + if (err) + return err; + + err = rtl821x_ack_interrupt(phydev); + } return err; } @@ -128,11 +155,20 @@ static int rtl8211e_config_intr(struct phy_device *phydev) { int err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = rtl821x_ack_interrupt(phydev); + if (err) + return err; + err = phy_write(phydev, RTL821x_INER, RTL8211E_INER_LINK_STATUS); - else + } else { err = phy_write(phydev, RTL821x_INER, 0); + if (err) + return err; + + err = rtl821x_ack_interrupt(phydev); + } return err; } @@ -140,13 +176,85 @@ static int rtl8211e_config_intr(struct phy_device *phydev) static int rtl8211f_config_intr(struct phy_device *phydev) { u16 val; + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = rtl8211f_ack_interrupt(phydev); + if (err) + return err; - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) val = RTL8211F_INER_LINK_STATUS; - else + err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val); + } else { val = 0; + err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val); + if (err) + return err; - return phy_write_paged(phydev, 0xa42, RTL821x_INER, val); + err = rtl8211f_ack_interrupt(phydev); + } + + return err; +} + +static irqreturn_t rtl8201_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read(phydev, RTL8201F_ISR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & RTL8201F_ISR_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static irqreturn_t rtl821x_handle_interrupt(struct phy_device *phydev) +{ + int irq_status, irq_enabled; + + irq_status = phy_read(phydev, RTL821x_INSR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + irq_enabled = phy_read(phydev, RTL821x_INER); + if (irq_enabled < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & irq_enabled)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + +static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev) +{ + int irq_status; + + irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & RTL8211F_INER_LINK_STATUS)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int rtl8211_config_aneg(struct phy_device *phydev) @@ -556,8 +664,8 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc816), .name = "RTL8201F Fast Ethernet", - .ack_interrupt = &rtl8201_ack_interrupt, .config_intr = &rtl8201_config_intr, + .handle_interrupt = rtl8201_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = rtl821x_read_page, @@ -582,8 +690,8 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc912), .name = "RTL8211B Gigabit Ethernet", - .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, + .handle_interrupt = rtl821x_handle_interrupt, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, .suspend = rtl8211b_suspend, @@ -601,8 +709,8 @@ static struct phy_driver realtek_drvs[] = { }, { PHY_ID_MATCH_EXACT(0x001cc914), .name = "RTL8211DN Gigabit Ethernet", - .ack_interrupt = rtl821x_ack_interrupt, .config_intr = rtl8211e_config_intr, + .handle_interrupt = rtl821x_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = rtl821x_read_page, @@ -611,8 +719,8 @@ static struct phy_driver realtek_drvs[] = { PHY_ID_MATCH_EXACT(0x001cc915), .name = "RTL8211E Gigabit Ethernet", .config_init = &rtl8211e_config_init, - .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211e_config_intr, + .handle_interrupt = rtl821x_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = rtl821x_read_page, @@ -621,8 +729,9 @@ static struct phy_driver realtek_drvs[] = { PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", .config_init = &rtl8211f_config_init, - .ack_interrupt = &rtl8211f_ack_interrupt, + .read_status = rtlgen_read_status, .config_intr = &rtl8211f_config_intr, + .handle_interrupt = rtl8211f_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, .read_page = rtl821x_read_page, @@ -662,6 +771,46 @@ static struct phy_driver realtek_drvs[] = { .read_mmd = rtl822x_read_mmd, .write_mmd = rtl822x_write_mmd, }, { + PHY_ID_MATCH_EXACT(0x001cc838), + .name = "RTL8226-CG 2.5Gbps PHY", + .get_features = rtl822x_get_features, + .config_aneg = rtl822x_config_aneg, + .read_status = rtl822x_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_EXACT(0x001cc848), + .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY", + .get_features = rtl822x_get_features, + .config_aneg = rtl822x_config_aneg, + .read_status = rtl822x_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_EXACT(0x001cc849), + .name = "RTL8221B-VB-CG 2.5Gbps PHY", + .get_features = rtl822x_get_features, + .config_aneg = rtl822x_config_aneg, + .read_status = rtl822x_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_EXACT(0x001cc84a), + .name = "RTL8221B-VM-CG 2.5Gbps PHY", + .get_features = rtl822x_get_features, + .config_aneg = rtl822x_config_aneg, + .read_status = rtl822x_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", .config_init = &rtl8366rb_config_init, @@ -670,8 +819,8 @@ static struct phy_driver realtek_drvs[] = { * irq is requested and ACKed by reading the status register, * which is done by the irqchip code. */ - .ack_interrupt = genphy_no_ack_interrupt, .config_intr = genphy_no_config_intr, + .handle_interrupt = genphy_handle_interrupt_no_ack, .suspend = genphy_suspend, .resume = genphy_resume, }, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 58014feedf6c..20b91f5dfc6e 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -334,14 +334,13 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, } /* If we haven't discovered any modes that this module supports, try - * the encoding and bitrate to determine supported modes. Some BiDi - * modules (eg, 1310nm/1550nm) are not 1000BASE-BX compliant due to - * the differing wavelengths, so do not set any transceiver bits. + * the bitrate to determine supported modes. Some BiDi modules (eg, + * 1310nm/1550nm) are not 1000BASE-BX compliant due to the differing + * wavelengths, so do not set any transceiver bits. */ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) { - /* If the encoding and bit rate allows 1000baseX */ - if (id->base.encoding == SFF8024_ENCODING_8B10B && br_nom && - br_min <= 1300 && br_max >= 1200) + /* If the bit rate allows 1000baseX */ + if (br_nom && br_min <= 1300 && br_max >= 1200) phylink_set(modes, 1000baseX_Full); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 34aa196b7465..91d74c1a920a 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -219,6 +219,7 @@ struct sfp { struct sfp_bus *sfp_bus; struct phy_device *mod_phy; const struct sff_data *type; + size_t i2c_block_size; u32 max_power_mW; unsigned int (*get_state)(struct sfp *); @@ -335,10 +336,19 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, size_t len) { struct i2c_msg msgs[2]; - u8 bus_addr = a2 ? 0x51 : 0x50; + size_t block_size; size_t this_len; + u8 bus_addr; int ret; + if (a2) { + block_size = 16; + bus_addr = 0x51; + } else { + block_size = sfp->i2c_block_size; + bus_addr = 0x50; + } + msgs[0].addr = bus_addr; msgs[0].flags = 0; msgs[0].len = 1; @@ -350,8 +360,8 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, while (len) { this_len = len; - if (this_len > 16) - this_len = 16; + if (this_len > block_size) + this_len = block_size; msgs[1].len = this_len; @@ -1632,6 +1642,28 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable) return 0; } +/* Some modules (Nokia 3FE46541AA) lock up if byte 0x51 is read as a + * single read. Switch back to reading 16 byte blocks unless we have + * a CarlitoxxPro module (rebranded VSOL V2801F). Even more annoyingly, + * some VSOL V2801F have the vendor name changed to OEM. + */ +static int sfp_quirk_i2c_block_size(const struct sfp_eeprom_base *base) +{ + if (!memcmp(base->vendor_name, "VSOL ", 16)) + return 1; + if (!memcmp(base->vendor_name, "OEM ", 16) && + !memcmp(base->vendor_pn, "V2801F ", 16)) + return 1; + + /* Some modules can't cope with long reads */ + return 16; +} + +static void sfp_quirks_base(struct sfp *sfp, const struct sfp_eeprom_base *base) +{ + sfp->i2c_block_size = sfp_quirk_i2c_block_size(base); +} + static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id) { u8 check; @@ -1673,14 +1705,20 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) u8 check; int ret; - ret = sfp_read(sfp, false, 0, &id, sizeof(id)); + /* Some modules (CarlitoxxPro CPGOS03-0490) do not support multibyte + * reads from the EEPROM, so start by reading the base identifying + * information one byte at a time. + */ + sfp->i2c_block_size = 1; + + ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base)); if (ret < 0) { if (report) dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); return -EAGAIN; } - if (ret != sizeof(id)) { + if (ret != sizeof(id.base)) { dev_err(sfp->dev, "EEPROM short read: %d\n", ret); return -EAGAIN; } @@ -1719,6 +1757,21 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) } } + /* Apply any early module-specific quirks */ + sfp_quirks_base(sfp, &id.base); + + ret = sfp_read(sfp, false, SFP_CC_BASE + 1, &id.ext, sizeof(id.ext)); + if (ret < 0) { + if (report) + dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret); + return -EAGAIN; + } + + if (ret != sizeof(id.ext)) { + dev_err(sfp->dev, "EEPROM short read: %d\n", ret); + return -EAGAIN; + } + check = sfp_check(&id.ext, sizeof(id.ext) - 1); if (check != id.ext.cc_ext) { if (cotsworks) { diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 0fc39ac5ca88..33372756a451 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -48,6 +48,13 @@ struct smsc_phy_priv { struct clk *refclk; }; +static int smsc_phy_ack_interrupt(struct phy_device *phydev) +{ + int rc = phy_read(phydev, MII_LAN83C185_ISF); + + return rc < 0 ? rc : 0; +} + static int smsc_phy_config_intr(struct phy_device *phydev) { struct smsc_phy_priv *priv = phydev->priv; @@ -55,21 +62,47 @@ static int smsc_phy_config_intr(struct phy_device *phydev) int rc; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + rc = smsc_phy_ack_interrupt(phydev); + if (rc) + return rc; + intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6; if (priv->energy_enable) intmask |= MII_LAN83C185_ISF_INT7; - } + rc = phy_write(phydev, MII_LAN83C185_IM, intmask); + } else { + rc = phy_write(phydev, MII_LAN83C185_IM, intmask); + if (rc) + return rc; - rc = phy_write(phydev, MII_LAN83C185_IM, intmask); + rc = smsc_phy_ack_interrupt(phydev); + } return rc < 0 ? rc : 0; } -static int smsc_phy_ack_interrupt(struct phy_device *phydev) +static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev) { - int rc = phy_read (phydev, MII_LAN83C185_ISF); + int irq_status, irq_enabled; - return rc < 0 ? rc : 0; + irq_enabled = phy_read(phydev, MII_LAN83C185_IM); + if (irq_enabled < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + irq_status = phy_read(phydev, MII_LAN83C185_ISF); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & irq_enabled)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static int smsc_phy_config_init(struct phy_device *phydev) @@ -314,8 +347,8 @@ static struct phy_driver smsc_phy_driver[] = { .soft_reset = smsc_phy_reset, /* IRQ related */ - .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, @@ -333,8 +366,8 @@ static struct phy_driver smsc_phy_driver[] = { .soft_reset = smsc_phy_reset, /* IRQ related */ - .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, /* Statistics */ .get_sset_count = smsc_get_sset_count, @@ -362,8 +395,8 @@ static struct phy_driver smsc_phy_driver[] = { .config_aneg = lan87xx_config_aneg, /* IRQ related */ - .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, /* Statistics */ .get_sset_count = smsc_get_sset_count, @@ -385,8 +418,8 @@ static struct phy_driver smsc_phy_driver[] = { .config_init = lan911x_config_init, /* IRQ related */ - .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, @@ -410,8 +443,8 @@ static struct phy_driver smsc_phy_driver[] = { .config_aneg = lan87xx_config_aneg_ext, /* IRQ related */ - .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, /* Statistics */ .get_sset_count = smsc_get_sset_count, @@ -436,8 +469,8 @@ static struct phy_driver smsc_phy_driver[] = { .soft_reset = smsc_phy_reset, /* IRQ related */ - .ack_interrupt = smsc_phy_ack_interrupt, .config_intr = smsc_phy_config_intr, + .handle_interrupt = smsc_phy_handle_interrupt, /* Statistics */ .get_sset_count = smsc_get_sset_count, diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index d735a01380ed..431fe5e0ce31 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -48,32 +48,55 @@ static int ste10Xp_config_init(struct phy_device *phydev) return 0; } +static int ste10Xp_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, MII_XCIIS); + + if (err < 0) + return err; + + return 0; +} + static int ste10Xp_config_intr(struct phy_device *phydev) { - int err, value; + int err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* clear any pending interrupts */ + err = ste10Xp_ack_interrupt(phydev); + if (err) + return err; + /* Enable all STe101P interrupts (PR12) */ err = phy_write(phydev, MII_XIE, MII_XIE_DEFAULT_MASK); - /* clear any pending interrupts */ - if (err == 0) { - value = phy_read(phydev, MII_XCIIS); - if (value < 0) - err = value; - } - } else + } else { err = phy_write(phydev, MII_XIE, 0); + if (err) + return err; + + err = ste10Xp_ack_interrupt(phydev); + } return err; } -static int ste10Xp_ack_interrupt(struct phy_device *phydev) +static irqreturn_t ste10Xp_handle_interrupt(struct phy_device *phydev) { - int err = phy_read(phydev, MII_XCIIS); - if (err < 0) - return err; + int irq_status; - return 0; + irq_status = phy_read(phydev, MII_XCIIS); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & MII_XIE_DEFAULT_MASK)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; } static struct phy_driver ste10xp_pdriver[] = { @@ -83,8 +106,8 @@ static struct phy_driver ste10xp_pdriver[] = { .name = "STe101p", /* PHY_BASIC_FEATURES */ .config_init = ste10Xp_config_init, - .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, + .handle_interrupt = ste10Xp_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -93,8 +116,8 @@ static struct phy_driver ste10xp_pdriver[] = { .name = "STe100p", /* PHY_BASIC_FEATURES */ .config_init = ste10Xp_config_init, - .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, + .handle_interrupt = ste10Xp_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, } }; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index bb680352708a..16704e243162 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -40,6 +40,11 @@ #define MII_VSC8244_ISTAT_SPEED 0x4000 #define MII_VSC8244_ISTAT_LINK 0x2000 #define MII_VSC8244_ISTAT_DUPLEX 0x1000 +#define MII_VSC8244_ISTAT_MASK (MII_VSC8244_ISTAT_SPEED | \ + MII_VSC8244_ISTAT_LINK | \ + MII_VSC8244_ISTAT_DUPLEX) + +#define MII_VSC8221_ISTAT_MASK MII_VSC8244_ISTAT_LINK /* Vitesse Auxiliary Control/Status Register */ #define MII_VSC8244_AUX_CONSTAT 0x1c @@ -270,25 +275,14 @@ static int vsc8601_config_init(struct phy_device *phydev) return 0; } -static int vsc824x_ack_interrupt(struct phy_device *phydev) -{ - int err = 0; - - /* Don't bother to ACK the interrupts if interrupts - * are disabled. The 824x cannot clear the interrupts - * if they are disabled. - */ - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) - err = phy_read(phydev, MII_VSC8244_ISTAT); - - return (err < 0) ? err : 0; -} - static int vsc82xx_config_intr(struct phy_device *phydev) { int err; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + /* Don't bother to ACK the interrupts since the 824x cannot + * clear the interrupts if they are disabled. + */ err = phy_write(phydev, MII_VSC8244_IMASK, (phydev->drv->phy_id == PHY_ID_VSC8234 || phydev->drv->phy_id == PHY_ID_VSC8244 || @@ -311,6 +305,31 @@ static int vsc82xx_config_intr(struct phy_device *phydev) return err; } +static irqreturn_t vsc82xx_handle_interrupt(struct phy_device *phydev) +{ + int irq_status, irq_mask; + + if (phydev->drv->phy_id == PHY_ID_VSC8244 || + phydev->drv->phy_id == PHY_ID_VSC8572 || + phydev->drv->phy_id == PHY_ID_VSC8601) + irq_mask = MII_VSC8244_ISTAT_MASK; + else + irq_mask = MII_VSC8221_ISTAT_MASK; + + irq_status = phy_read(phydev, MII_VSC8244_ISTAT); + if (irq_status < 0) { + phy_error(phydev); + return IRQ_NONE; + } + + if (!(irq_status & irq_mask)) + return IRQ_NONE; + + phy_trigger_machine(phydev); + + return IRQ_HANDLED; +} + static int vsc8221_config_init(struct phy_device *phydev) { int err; @@ -390,8 +409,8 @@ static struct phy_driver vsc82xx_driver[] = { /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, }, { .phy_id = PHY_ID_VSC8244, .name = "Vitesse VSC8244", @@ -399,8 +418,8 @@ static struct phy_driver vsc82xx_driver[] = { /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, }, { .phy_id = PHY_ID_VSC8572, .name = "Vitesse VSC8572", @@ -408,16 +427,16 @@ static struct phy_driver vsc82xx_driver[] = { /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, }, { .phy_id = PHY_ID_VSC8601, .name = "Vitesse VSC8601", .phy_id_mask = 0x000ffff0, /* PHY_GBIT_FEATURES */ .config_init = &vsc8601_config_init, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, }, { .phy_id = PHY_ID_VSC7385, .name = "Vitesse VSC7385", @@ -461,8 +480,8 @@ static struct phy_driver vsc82xx_driver[] = { /* PHY_GBIT_FEATURES */ .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, }, { /* Vitesse 8221 */ .phy_id = PHY_ID_VSC8221, @@ -470,8 +489,8 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8221", /* PHY_GBIT_FEATURES */ .config_init = &vsc8221_config_init, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, }, { /* Vitesse 8211 */ .phy_id = PHY_ID_VSC8211, @@ -479,8 +498,8 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8211", /* PHY_GBIT_FEATURES */ .config_init = &vsc8221_config_init, - .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, + .handle_interrupt = &vsc82xx_handle_interrupt, } }; module_phy_driver(vsc82xx_driver); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 7d005896a0f9..09c27f7773f9 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -174,7 +174,8 @@ struct channel { struct ppp *ppp; /* ppp unit we're connected to */ struct net *chan_net; /* the net channel belongs to */ struct list_head clist; /* link in list of channels per unit */ - rwlock_t upl; /* protects `ppp' */ + rwlock_t upl; /* protects `ppp' and 'bridge' */ + struct channel __rcu *bridge; /* "bridged" ppp channel */ #ifdef CONFIG_PPP_MULTILINK u8 avail; /* flag used in multilink stuff */ u8 had_frag; /* >= 1 fragments have been sent */ @@ -606,6 +607,83 @@ static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p) #endif #endif +/* Bridge one PPP channel to another. + * When two channels are bridged, ppp_input on one channel is redirected to + * the other's ops->start_xmit handler. + * In order to safely bridge channels we must reject channels which are already + * part of a bridge instance, or which form part of an existing unit. + * Once successfully bridged, each channel holds a reference on the other + * to prevent it being freed while the bridge is extant. + */ +static int ppp_bridge_channels(struct channel *pch, struct channel *pchb) +{ + write_lock_bh(&pch->upl); + if (pch->ppp || + rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) { + write_unlock_bh(&pch->upl); + return -EALREADY; + } + rcu_assign_pointer(pch->bridge, pchb); + write_unlock_bh(&pch->upl); + + write_lock_bh(&pchb->upl); + if (pchb->ppp || + rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl))) { + write_unlock_bh(&pchb->upl); + goto err_unset; + } + rcu_assign_pointer(pchb->bridge, pch); + write_unlock_bh(&pchb->upl); + + refcount_inc(&pch->file.refcnt); + refcount_inc(&pchb->file.refcnt); + + return 0; + +err_unset: + write_lock_bh(&pch->upl); + RCU_INIT_POINTER(pch->bridge, NULL); + write_unlock_bh(&pch->upl); + synchronize_rcu(); + return -EALREADY; +} + +static int ppp_unbridge_channels(struct channel *pch) +{ + struct channel *pchb, *pchbb; + + write_lock_bh(&pch->upl); + pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl)); + if (!pchb) { + write_unlock_bh(&pch->upl); + return -EINVAL; + } + RCU_INIT_POINTER(pch->bridge, NULL); + write_unlock_bh(&pch->upl); + + /* Only modify pchb if phcb->bridge points back to pch. + * If not, it implies that there has been a race unbridging (and possibly + * even rebridging) pchb. We should leave pchb alone to avoid either a + * refcount underflow, or breaking another established bridge instance. + */ + write_lock_bh(&pchb->upl); + pchbb = rcu_dereference_protected(pchb->bridge, lockdep_is_held(&pchb->upl)); + if (pchbb == pch) + RCU_INIT_POINTER(pchb->bridge, NULL); + write_unlock_bh(&pchb->upl); + + synchronize_rcu(); + + if (pchbb == pch) + if (refcount_dec_and_test(&pch->file.refcnt)) + ppp_destroy_channel(pch); + + if (refcount_dec_and_test(&pchb->file.refcnt)) + ppp_destroy_channel(pchb); + + return 0; +} + static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ppp_file *pf; @@ -641,8 +719,9 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } if (pf->kind == CHANNEL) { - struct channel *pch; + struct channel *pch, *pchb; struct ppp_channel *chan; + struct ppp_net *pn; pch = PF_TO_CHANNEL(pf); @@ -657,6 +736,31 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = ppp_disconnect_channel(pch); break; + case PPPIOCBRIDGECHAN: + if (get_user(unit, p)) + break; + err = -ENXIO; + pn = ppp_pernet(current->nsproxy->net_ns); + spin_lock_bh(&pn->all_channels_lock); + pchb = ppp_find_channel(pn, unit); + /* Hold a reference to prevent pchb being freed while + * we establish the bridge. + */ + if (pchb) + refcount_inc(&pchb->file.refcnt); + spin_unlock_bh(&pn->all_channels_lock); + if (!pchb) + break; + err = ppp_bridge_channels(pch, pchb); + /* Drop earlier refcount now bridge establishment is complete */ + if (refcount_dec_and_test(&pchb->file.refcnt)) + ppp_destroy_channel(pchb); + break; + + case PPPIOCUNBRIDGECHAN: + err = ppp_unbridge_channels(pch); + break; + default: down_read(&pch->chan_sem); chan = pch->chan; @@ -2089,6 +2193,40 @@ static bool ppp_decompress_proto(struct sk_buff *skb) return pskb_may_pull(skb, 2); } +/* Attempt to handle a frame via. a bridged channel, if one exists. + * If the channel is bridged, the frame is consumed by the bridge. + * If not, the caller must handle the frame by normal recv mechanisms. + * Returns true if the frame is consumed, false otherwise. + */ +static bool ppp_channel_bridge_input(struct channel *pch, struct sk_buff *skb) +{ + struct channel *pchb; + + rcu_read_lock(); + pchb = rcu_dereference(pch->bridge); + if (!pchb) + goto out_rcu; + + spin_lock(&pchb->downl); + if (!pchb->chan) { + /* channel got unregistered */ + kfree_skb(skb); + goto outl; + } + + skb_scrub_packet(skb, !net_eq(pch->chan_net, pchb->chan_net)); + if (!pchb->chan->ops->start_xmit(pchb->chan, skb)) + kfree_skb(skb); + +outl: + spin_unlock(&pchb->downl); +out_rcu: + rcu_read_unlock(); + + /* If pchb is set then we've consumed the packet */ + return !!pchb; +} + void ppp_input(struct ppp_channel *chan, struct sk_buff *skb) { @@ -2100,6 +2238,10 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) return; } + /* If the channel is bridged, transmit via. bridge */ + if (ppp_channel_bridge_input(pch, skb)) + return; + read_lock_bh(&pch->upl); if (!ppp_decompress_proto(skb)) { kfree_skb(skb); @@ -2796,8 +2938,11 @@ ppp_unregister_channel(struct ppp_channel *chan) list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); + ppp_unbridge_channels(pch); + pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); + if (refcount_dec_and_test(&pch->file.refcnt)) ppp_destroy_channel(pch); } @@ -3270,7 +3415,8 @@ ppp_connect_channel(struct channel *pch, int unit) goto out; write_lock_bh(&pch->upl); ret = -EINVAL; - if (pch->ppp) + if (pch->ppp || + rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl))) goto outl; ppp_lock(ppp); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 07f1f3933927..c19dac21c468 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -4,6 +4,7 @@ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com> */ +#include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> @@ -975,11 +976,11 @@ static void team_port_disable(struct team *team, } #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ + NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ NETIF_F_HIGHDMA | NETIF_F_LRO) #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ - NETIF_F_RXCSUM | NETIF_F_ALL_TSO) + NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) static void __team_compute_features(struct team *team) { @@ -1009,8 +1010,7 @@ static void __team_compute_features(struct team *team) team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX | - NETIF_F_GSO_UDP_L4; + NETIF_F_HW_VLAN_STAG_TX; team->dev->hard_header_len = max_hard_header_len; team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@ -2175,7 +2175,7 @@ static void team_setup(struct net_device *dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; + dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; dev->features |= dev->hw_features; dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 3160443ef3b9..ae83d66195a5 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1241,7 +1241,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; net->handler.uuid = &tbnet_svc_uuid; - net->handler.callback = tbnet_handle_packet, + net->handler.callback = tbnet_handle_packet; net->handler.data = net; tb_register_protocol_handler(&net->handler); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index cd06cae76035..fbed05ae7b0f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -107,17 +107,6 @@ struct tap_filter { #define TUN_FLOW_EXPIRE (3 * HZ) -struct tun_pcpu_stats { - u64_stats_t rx_packets; - u64_stats_t rx_bytes; - u64_stats_t tx_packets; - u64_stats_t tx_bytes; - struct u64_stats_sync syncp; - u32 rx_dropped; - u32 tx_dropped; - u32 rx_frame_errors; -}; - /* A tun_file connects an open character device to a tuntap netdevice. It * also contains all socket related structures (except sock_fprog and tap_filter) * to serve as one transmit queue for tuntap device. The sock_fprog and @@ -207,7 +196,7 @@ struct tun_struct { void *security; u32 flow_count; u32 rx_batched; - struct tun_pcpu_stats __percpu *pcpu_stats; + atomic_long_t rx_frame_errors; struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; @@ -791,7 +780,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, } else { /* Setup XDP RX-queue info, for new tfile getting attached */ err = xdp_rxq_info_reg(&tfile->xdp_rxq, - tun->dev, tfile->queue_index); + tun->dev, tfile->queue_index, 0); if (err < 0) goto out; err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, @@ -1066,7 +1055,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; drop: - this_cpu_inc(tun->pcpu_stats->tx_dropped); + atomic_long_inc(&dev->tx_dropped); skb_tx_error(skb); kfree_skb(skb); rcu_read_unlock(); @@ -1103,37 +1092,12 @@ static void tun_set_headroom(struct net_device *dev, int new_hr) static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { - u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0; struct tun_struct *tun = netdev_priv(dev); - struct tun_pcpu_stats *p; - int i; - - for_each_possible_cpu(i) { - u64 rxpackets, rxbytes, txpackets, txbytes; - unsigned int start; - - p = per_cpu_ptr(tun->pcpu_stats, i); - do { - start = u64_stats_fetch_begin(&p->syncp); - rxpackets = u64_stats_read(&p->rx_packets); - rxbytes = u64_stats_read(&p->rx_bytes); - txpackets = u64_stats_read(&p->tx_packets); - txbytes = u64_stats_read(&p->tx_bytes); - } while (u64_stats_fetch_retry(&p->syncp, start)); - stats->rx_packets += rxpackets; - stats->rx_bytes += rxbytes; - stats->tx_packets += txpackets; - stats->tx_bytes += txbytes; + dev_get_tstats64(dev, stats); - /* u32 counters */ - rx_dropped += p->rx_dropped; - rx_frame_errors += p->rx_frame_errors; - tx_dropped += p->tx_dropped; - } - stats->rx_dropped = rx_dropped; - stats->rx_frame_errors = rx_frame_errors; - stats->tx_dropped = tx_dropped; + stats->rx_frame_errors += + (unsigned long)atomic_long_read(&tun->rx_frame_errors); } static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, @@ -1247,7 +1211,7 @@ resample: void *frame = tun_xdp_to_ptr(xdp); if (__ptr_ring_produce(&tfile->tx_ring, frame)) { - this_cpu_inc(tun->pcpu_stats->tx_dropped); + atomic_long_inc(&dev->tx_dropped); xdp_return_frame_rx_napi(xdp); drops++; } @@ -1283,7 +1247,7 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_select_queue = tun_select_queue, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, - .ndo_get_stats64 = tun_net_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, .ndo_change_carrier = tun_net_change_carrier, @@ -1577,7 +1541,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, trace_xdp_exception(tun->dev, xdp_prog, act); fallthrough; case XDP_DROP: - this_cpu_inc(tun->pcpu_stats->rx_dropped); + atomic_long_inc(&tun->dev->rx_dropped); break; } @@ -1683,7 +1647,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, size_t total_len = iov_iter_count(from); size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr gso = { 0 }; - struct tun_pcpu_stats *stats; int good_linear; int copylen; bool zerocopy = false; @@ -1752,7 +1715,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, */ skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); if (IS_ERR(skb)) { - this_cpu_inc(tun->pcpu_stats->rx_dropped); + atomic_long_inc(&tun->dev->rx_dropped); return PTR_ERR(skb); } if (!skb) @@ -1781,7 +1744,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EAGAIN) - this_cpu_inc(tun->pcpu_stats->rx_dropped); + atomic_long_inc(&tun->dev->rx_dropped); if (frags) mutex_unlock(&tfile->napi_mutex); return PTR_ERR(skb); @@ -1795,7 +1758,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (err) { err = -EFAULT; drop: - this_cpu_inc(tun->pcpu_stats->rx_dropped); + atomic_long_inc(&tun->dev->rx_dropped); kfree_skb(skb); if (frags) { tfile->napi.skb = NULL; @@ -1807,7 +1770,7 @@ drop: } if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { - this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); if (frags) { tfile->napi.skb = NULL; @@ -1830,7 +1793,7 @@ drop: pi.proto = htons(ETH_P_IPV6); break; default: - this_cpu_inc(tun->pcpu_stats->rx_dropped); + atomic_long_inc(&tun->dev->rx_dropped); kfree_skb(skb); return -EINVAL; } @@ -1910,7 +1873,7 @@ drop: skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { - this_cpu_inc(tun->pcpu_stats->rx_dropped); + atomic_long_inc(&tun->dev->rx_dropped); napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); @@ -1942,12 +1905,9 @@ drop: } rcu_read_unlock(); - stats = get_cpu_ptr(tun->pcpu_stats); - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_packets); - u64_stats_add(&stats->rx_bytes, len); - u64_stats_update_end(&stats->syncp); - put_cpu_ptr(stats); + preempt_disable(); + dev_sw_netstats_rx_add(tun->dev, len); + preempt_enable(); if (rxhash) tun_flow_update(tun, rxhash, tfile); @@ -1982,7 +1942,6 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun, { int vnet_hdr_sz = 0; size_t size = xdp_frame->len; - struct tun_pcpu_stats *stats; size_t ret; if (tun->flags & IFF_VNET_HDR) { @@ -1999,12 +1958,9 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun, ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; - stats = get_cpu_ptr(tun->pcpu_stats); - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_packets); - u64_stats_add(&stats->tx_bytes, ret); - u64_stats_update_end(&stats->syncp); - put_cpu_ptr(tun->pcpu_stats); + preempt_disable(); + dev_sw_netstats_tx_add(tun->dev, 1, ret); + preempt_enable(); return ret; } @@ -2016,7 +1972,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; - struct tun_pcpu_stats *stats; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; @@ -2094,12 +2049,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, done: /* caller is in process context, */ - stats = get_cpu_ptr(tun->pcpu_stats); - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_packets); - u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen); - u64_stats_update_end(&stats->syncp); - put_cpu_ptr(tun->pcpu_stats); + preempt_disable(); + dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); + preempt_enable(); return total; } @@ -2243,11 +2195,11 @@ static void tun_free_netdev(struct net_device *dev) BUG_ON(!(list_empty(&tun->disabled))); - free_percpu(tun->pcpu_stats); - /* We clear pcpu_stats so that tun_set_iff() can tell if + free_percpu(dev->tstats); + /* We clear tstats so that tun_set_iff() can tell if * tun_free_netdev() has been called from register_netdevice(). */ - tun->pcpu_stats = NULL; + dev->tstats = NULL; tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); @@ -2378,7 +2330,6 @@ static int tun_xdp_one(struct tun_struct *tun, unsigned int datasize = xdp->data_end - xdp->data; struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; - struct tun_pcpu_stats *stats; struct bpf_prog *xdp_prog; struct sk_buff *skb = NULL; u32 rxhash = 0, act; @@ -2436,7 +2387,7 @@ build: skb_put(skb, xdp->data_end - xdp->data); if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { - this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); err = -EINVAL; goto out; @@ -2459,14 +2410,10 @@ build: netif_receive_skb(skb); - /* No need for get_cpu_ptr() here since this function is + /* No need to disable preemption here since this function is * always called with bh disabled */ - stats = this_cpu_ptr(tun->pcpu_stats); - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_packets); - u64_stats_add(&stats->rx_bytes, datasize); - u64_stats_update_end(&stats->syncp); + dev_sw_netstats_rx_add(tun->dev, datasize); if (rxhash) tun_flow_update(tun, rxhash, tfile); @@ -2759,8 +2706,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); - tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); - if (!tun->pcpu_stats) { + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) { err = -ENOMEM; goto err_free_dev; } @@ -2815,16 +2762,16 @@ err_detach: tun_detach_all(dev); /* We are here because register_netdevice() has failed. * If register_netdevice() already called tun_free_netdev() - * while dealing with the error, tun->pcpu_stats has been cleared. + * while dealing with the error, dev->stats has been cleared. */ - if (!tun->pcpu_stats) + if (!dev->tstats) goto err_free_dev; err_free_flow: tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); err_free_stat: - free_percpu(tun->pcpu_stats); + free_percpu(dev->tstats); err_free_dev: free_netdev(dev); return err; @@ -3132,10 +3079,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { + ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, + tun->dev); + ret = notifier_to_errno(ret); + if (ret) { + netif_info(tun, drv, tun->dev, + "Refused to change device type\n"); + break; + } tun->dev->type = (int) arg; netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); - ret = 0; + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, + tun->dev); } break; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index b46993d5f997..1e3719028780 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -628,4 +628,13 @@ config USB_NET_AQC111 This driver should work with at least the following devices: * Aquantia AQtion USB to 5GbE +config USB_RTL8153_ECM + tristate "RTL8153 ECM support" + depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n) + default y + help + This option supports ECM mode for RTL8153 ethernet adapter, when + CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not + supported by r8152 driver. + endif # USB_NET_DRIVERS diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 99fd12be2111..4964f7b326fb 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -41,3 +41,4 @@ obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o obj-$(CONFIG_USB_NET_CH9200) += ch9200.o obj-$(CONFIG_USB_NET_AQC111) += aqc111.o +obj-$(CONFIG_USB_RTL8153_ECM) += r8153_ecm.o diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index 0717c18015c9..73b97f4cc1ec 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -641,7 +641,7 @@ static const struct net_device_ops aqc111_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = aqc111_change_mtu, .ndo_set_mac_address = aqc111_set_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index ef548beba684..6e13d8165852 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -194,7 +194,7 @@ static const struct net_device_ops ax88172_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = asix_ioctl, @@ -580,7 +580,7 @@ static const struct net_device_ops ax88772_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = asix_ioctl, @@ -1050,7 +1050,7 @@ static const struct net_device_ops ax88178_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = asix_set_multicast, diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index fd3a04d98dc1..b404c9462dce 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -120,7 +120,7 @@ static const struct net_device_ops ax88172a_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = phy_do_ioctl_running, diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 5541f3faedbc..d650b39b6e5d 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1031,7 +1031,7 @@ static const struct net_device_ops ax88179_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = ax88179_change_mtu, .ndo_set_mac_address = ax88179_set_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index dba847f28096..02e6bbb17b15 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -275,7 +275,7 @@ static const struct net_device_ops usbpn_ops = { static void usbpn_setup(struct net_device *dev) { dev->features = 0; - dev->netdev_ops = &usbpn_ops, + dev->netdev_ops = &usbpn_ops; dev->header_ops = &phonet_header_ops; dev->type = ARPHRD_PHONET; dev->flags = IFF_POINTOPOINT | IFF_NOARP; diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index eb100eb33de3..5db66272fc82 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -98,7 +98,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = cdc_ncm_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index e04f588538cc..2bac57d5e8d5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -793,7 +793,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_set_rx_mode = usbnet_set_rx_mode, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = cdc_ncm_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -1317,7 +1317,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) break; } - /* calculate frame number withing this NDP */ + /* calculate frame number within this NDP */ if (ctx->is_ndp16) { ndplen = le16_to_cpu(ndp.ndp16->wLength); index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1; diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 915ac75b55fc..b5d2ac55a874 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -343,7 +343,7 @@ static const struct net_device_ops dm9601_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = dm9601_ioctl, .ndo_set_rx_mode = dm9601_set_multicast, diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c index cb5bc1a7fa5a..ed05f992c612 100644 --- a/drivers/net/usb/int51x1.c +++ b/drivers/net/usb/int51x1.c @@ -133,7 +133,7 @@ static const struct net_device_ops int51x1_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = int51x1_set_multicast, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 65b315bc60ab..bf243edeb064 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -822,20 +822,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { int i; - int ret; u32 buf; unsigned long timeout; - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ - ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + lan78xx_write_reg(dev, OTP_PWR_DN, 0); timeout = jiffies + HZ; do { usleep_range(1, 10); - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN"); @@ -845,18 +844,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, } for (i = 0; i < length; i++) { - ret = lan78xx_write_reg(dev, OTP_ADDR1, + lan78xx_write_reg(dev, OTP_ADDR1, ((offset + i) >> 8) & OTP_ADDR1_15_11); - ret = lan78xx_write_reg(dev, OTP_ADDR2, + lan78xx_write_reg(dev, OTP_ADDR2, ((offset + i) & OTP_ADDR2_10_3)); - ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); - ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); timeout = jiffies + HZ; do { udelay(1); - ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + lan78xx_read_reg(dev, OTP_STATUS, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_STATUS"); @@ -864,7 +863,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, } } while (buf & OTP_STATUS_BUSY_); - ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf); + lan78xx_read_reg(dev, OTP_RD_DATA, &buf); data[i] = (u8)(buf & 0xFF); } @@ -876,20 +875,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { int i; - int ret; u32 buf; unsigned long timeout; - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ - ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0); + lan78xx_write_reg(dev, OTP_PWR_DN, 0); timeout = jiffies + HZ; do { udelay(1); - ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf); + lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN completion"); @@ -899,21 +897,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, } /* set to BYTE program mode */ - ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); + lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); for (i = 0; i < length; i++) { - ret = lan78xx_write_reg(dev, OTP_ADDR1, + lan78xx_write_reg(dev, OTP_ADDR1, ((offset + i) >> 8) & OTP_ADDR1_15_11); - ret = lan78xx_write_reg(dev, OTP_ADDR2, + lan78xx_write_reg(dev, OTP_ADDR2, ((offset + i) & OTP_ADDR2_10_3)); - ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); - ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); - ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); + lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); + lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); + lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); timeout = jiffies + HZ; do { udelay(1); - ret = lan78xx_read_reg(dev, OTP_STATUS, &buf); + lan78xx_read_reg(dev, OTP_STATUS, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "Timeout on OTP_STATUS completion"); @@ -1038,7 +1036,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param) container_of(param, struct lan78xx_priv, set_multicast); struct lan78xx_net *dev = pdata->dev; int i; - int ret; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); @@ -1047,14 +1044,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param) DP_SEL_VHF_HASH_LEN, pdata->mchash_table); for (i = 1; i < NUM_OF_MAF; i++) { - ret = lan78xx_write_reg(dev, MAF_HI(i), 0); - ret = lan78xx_write_reg(dev, MAF_LO(i), + lan78xx_write_reg(dev, MAF_HI(i), 0); + lan78xx_write_reg(dev, MAF_LO(i), pdata->pfilter_table[i][1]); - ret = lan78xx_write_reg(dev, MAF_HI(i), + lan78xx_write_reg(dev, MAF_HI(i), pdata->pfilter_table[i][0]); } - ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); } static void lan78xx_set_multicast(struct net_device *netdev) @@ -1124,7 +1121,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; - int ret; u8 cap; if (dev->fc_autoneg) @@ -1147,10 +1143,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); - ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow); + lan78xx_write_reg(dev, FCT_FLOW, fct_flow); /* threshold value should be set before enabling flow */ - ret = lan78xx_write_reg(dev, FLOW, flow); + lan78xx_write_reg(dev, FLOW, flow); return 0; } @@ -1663,11 +1659,10 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { static void lan78xx_init_mac_address(struct lan78xx_net *dev) { u32 addr_lo, addr_hi; - int ret; u8 addr[6]; - ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); - ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); + lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); + lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); addr[0] = addr_lo & 0xFF; addr[1] = (addr_lo >> 8) & 0xFF; @@ -1700,12 +1695,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) (addr[2] << 16) | (addr[3] << 24); addr_hi = addr[4] | (addr[5] << 8); - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + lan78xx_write_reg(dev, RX_ADDRH, addr_hi); } - ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); ether_addr_copy(dev->net->dev_addr, addr); } @@ -1838,7 +1833,7 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev) static void lan78xx_link_status_change(struct net_device *net) { struct phy_device *phydev = net->phydev; - int ret, temp; + int temp; /* At forced 100 F/H mode, chip may fail to set mode correctly * when cable is switched between long(~50+m) and short one. @@ -1849,7 +1844,7 @@ static void lan78xx_link_status_change(struct net_device *net) /* disable phy interrupt */ temp = phy_read(phydev, LAN88XX_INT_MASK); temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; - ret = phy_write(phydev, LAN88XX_INT_MASK, temp); + phy_write(phydev, LAN88XX_INT_MASK, temp); temp = phy_read(phydev, MII_BMCR); temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); @@ -1863,7 +1858,7 @@ static void lan78xx_link_status_change(struct net_device *net) /* enable phy interrupt back */ temp = phy_read(phydev, LAN88XX_INT_MASK); temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; - ret = phy_write(phydev, LAN88XX_INT_MASK, temp); + phy_write(phydev, LAN88XX_INT_MASK, temp); } } @@ -1917,14 +1912,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd) struct lan78xx_net *dev = container_of(data, struct lan78xx_net, domain_data); u32 buf; - int ret; /* call register access here because irq_bus_lock & irq_bus_sync_unlock * are only two callbacks executed in non-atomic contex. */ - ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf); + lan78xx_read_reg(dev, INT_EP_CTL, &buf); if (buf != data->irqenable) - ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); + lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); mutex_unlock(&data->irq_lock); } @@ -1991,7 +1985,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) static int lan8835_fixup(struct phy_device *phydev) { int buf; - int ret; struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */ @@ -2001,11 +1994,11 @@ static int lan8835_fixup(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf); /* RGMII MAC TXC Delay Enable */ - ret = lan78xx_write_reg(dev, MAC_RGMII_ID, + lan78xx_write_reg(dev, MAC_RGMII_ID, MAC_RGMII_ID_TXC_DELAY_EN_); /* RGMII TX DLL Tune Adjust */ - ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); + lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; @@ -2189,28 +2182,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) { - int ret = 0; u32 buf; bool rxenabled; - ret = lan78xx_read_reg(dev, MAC_RX, &buf); + lan78xx_read_reg(dev, MAC_RX, &buf); rxenabled = ((buf & MAC_RX_RXEN_) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE_MASK_; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); if (rxenabled) { buf |= MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); } return 0; @@ -2267,13 +2259,12 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) int ll_mtu = new_mtu + netdev->hard_header_len; int old_hard_mtu = dev->hard_mtu; int old_rx_urb_size = dev->rx_urb_size; - int ret; /* no second zero-length packet read wanted after mtu-sized packets */ if ((ll_mtu % dev->maxpacket) == 0) return -EDOM; - ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); + lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN); netdev->mtu = new_mtu; @@ -2296,7 +2287,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) struct lan78xx_net *dev = netdev_priv(netdev); struct sockaddr *addr = p; u32 addr_lo, addr_hi; - int ret; if (netif_running(netdev)) return -EBUSY; @@ -2313,12 +2303,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) addr_hi = netdev->dev_addr[4] | netdev->dev_addr[5] << 8; - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + lan78xx_write_reg(dev, RX_ADDRH, addr_hi); /* Added to support MAC address changes */ - ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); - ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); + lan78xx_write_reg(dev, MAF_LO(0), addr_lo); + lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); return 0; } @@ -2330,7 +2320,6 @@ static int lan78xx_set_features(struct net_device *netdev, struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); unsigned long flags; - int ret; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); @@ -2354,7 +2343,7 @@ static int lan78xx_set_features(struct net_device *netdev, spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); - ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); + lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); return 0; } @@ -3804,7 +3793,6 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) { u32 buf; - int ret; int mask_index; u16 crc; u32 temp_wucsr; @@ -3813,26 +3801,26 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) const u8 ipv6_multicast[3] = { 0x33, 0x33 }; const u8 arp_type[2] = { 0x08, 0x06 }; - ret = lan78xx_read_reg(dev, MAC_TX, &buf); + lan78xx_read_reg(dev, MAC_TX, &buf); buf &= ~MAC_TX_TXEN_; - ret = lan78xx_write_reg(dev, MAC_TX, buf); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); + lan78xx_write_reg(dev, MAC_TX, buf); + lan78xx_read_reg(dev, MAC_RX, &buf); buf &= ~MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); - ret = lan78xx_write_reg(dev, WUCSR, 0); - ret = lan78xx_write_reg(dev, WUCSR2, 0); - ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); + lan78xx_write_reg(dev, WUCSR, 0); + lan78xx_write_reg(dev, WUCSR2, 0); + lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); temp_wucsr = 0; temp_pmt_ctl = 0; - ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); + lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); + lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); mask_index = 0; if (wol & WAKE_PHY) { @@ -3861,30 +3849,30 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); - ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); - ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); + lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; /* for IPv6 Multicast */ crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); - ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); - ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); + lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; @@ -3905,16 +3893,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) * for packettype (offset 12,13) = ARP (0x0806) */ crc = lan78xx_wakeframe_crc16(arp_type, 2); - ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), + lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_ALL_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); - ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); - ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); - ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); + lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); + lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; @@ -3922,7 +3910,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } - ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); + lan78xx_write_reg(dev, WUCSR, temp_wucsr); /* when multiple WOL bits are set */ if (hweight_long((unsigned long)wol) > 1) { @@ -3930,16 +3918,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } - ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); + lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); /* clear WUPS */ - ret = lan78xx_read_reg(dev, PMT_CTL, &buf); + lan78xx_read_reg(dev, PMT_CTL, &buf); buf |= PMT_CTL_WUPS_MASK_; - ret = lan78xx_write_reg(dev, PMT_CTL, buf); + lan78xx_write_reg(dev, PMT_CTL, buf); - ret = lan78xx_read_reg(dev, MAC_RX, &buf); + lan78xx_read_reg(dev, MAC_RX, &buf); buf |= MAC_RX_RXEN_; - ret = lan78xx_write_reg(dev, MAC_RX, buf); + lan78xx_write_reg(dev, MAC_RX, buf); return 0; } diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 09bfa6a4dfbc..fc512b780d15 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -462,7 +462,7 @@ static const struct net_device_ops mcs7830_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = mcs7830_ioctl, .ndo_set_rx_mode = mcs7830_set_multicast, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index fc378ff56775..d166c321ee9b 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -72,7 +72,6 @@ struct qmimux_hdr { struct qmimux_priv { struct net_device *real_dev; u8 mux_id; - struct pcpu_sw_netstats __percpu *stats64; }; static int qmimux_open(struct net_device *dev) @@ -108,34 +107,19 @@ static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev skb->dev = priv->real_dev; ret = dev_queue_xmit(skb); - if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { - struct pcpu_sw_netstats *stats64 = this_cpu_ptr(priv->stats64); - - u64_stats_update_begin(&stats64->syncp); - stats64->tx_packets++; - stats64->tx_bytes += len; - u64_stats_update_end(&stats64->syncp); - } else { + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) + dev_sw_netstats_tx_add(dev, 1, len); + else dev->stats.tx_dropped++; - } return ret; } -static void qmimux_get_stats64(struct net_device *net, - struct rtnl_link_stats64 *stats) -{ - struct qmimux_priv *priv = netdev_priv(net); - - netdev_stats_to_stats64(stats, &net->stats); - dev_fetch_sw_netstats(stats, priv->stats64); -} - static const struct net_device_ops qmimux_netdev_ops = { .ndo_open = qmimux_open, .ndo_stop = qmimux_stop, .ndo_start_xmit = qmimux_start_xmit, - .ndo_get_stats64 = qmimux_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, }; static void qmimux_setup(struct net_device *dev) @@ -224,14 +208,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb) net->stats.rx_errors++; return 0; } else { - struct pcpu_sw_netstats *stats64; - struct qmimux_priv *priv = netdev_priv(net); - - stats64 = this_cpu_ptr(priv->stats64); - u64_stats_update_begin(&stats64->syncp); - stats64->rx_packets++; - stats64->rx_bytes += pkt_len; - u64_stats_update_end(&stats64->syncp); + dev_sw_netstats_rx_add(net, pkt_len); } skip: @@ -256,8 +233,8 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id) priv->mux_id = mux_id; priv->real_dev = real_dev; - priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!priv->stats64) { + new_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!new_dev->tstats) { err = -ENOBUFS; goto out_free_newdev; } @@ -292,7 +269,7 @@ static void qmimux_unregister_device(struct net_device *dev, struct qmimux_priv *priv = netdev_priv(dev); struct net_device *real_dev = priv->real_dev; - free_percpu(priv->stats64); + free_percpu(dev->tstats); netdev_upper_dev_unlink(real_dev, dev); unregister_netdevice_queue(dev, head); @@ -598,7 +575,7 @@ static const struct net_device_ops qmi_wwan_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = qmi_wwan_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index b1770489aca5..c448d6089821 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -26,6 +26,7 @@ #include <linux/acpi.h> #include <linux/firmware.h> #include <crypto/hash.h> +#include <linux/usb/r8152.h> /* Information for net-next */ #define NETNEXT_VERSION "11" @@ -653,18 +654,6 @@ enum rtl_register_content { #define INTR_LINK 0x0004 -#define RTL8152_REQT_READ 0xc0 -#define RTL8152_REQT_WRITE 0x40 -#define RTL8152_REQ_GET_REGS 0x05 -#define RTL8152_REQ_SET_REGS 0x05 - -#define BYTE_EN_DWORD 0xff -#define BYTE_EN_WORD 0x33 -#define BYTE_EN_BYTE 0x11 -#define BYTE_EN_SIX_BYTES 0x3f -#define BYTE_EN_START_MASK 0x0f -#define BYTE_EN_END_MASK 0xf0 - #define RTL8153_MAX_PACKET 9216 /* 9K */ #define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \ ETH_FCS_LEN) @@ -689,21 +678,9 @@ enum rtl8152_flags { LENOVO_MACPASSTHRU, }; -/* Define these values to match your device */ -#define VENDOR_ID_REALTEK 0x0bda -#define VENDOR_ID_MICROSOFT 0x045e -#define VENDOR_ID_SAMSUNG 0x04e8 -#define VENDOR_ID_LENOVO 0x17ef -#define VENDOR_ID_LINKSYS 0x13b1 -#define VENDOR_ID_NVIDIA 0x0955 -#define VENDOR_ID_TPLINK 0x2357 - #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 -#define MCU_TYPE_PLA 0x0100 -#define MCU_TYPE_USB 0x0000 - struct tally_counter { __le64 tx_packets; __le64 rx_packets; @@ -898,6 +875,7 @@ struct fw_header { * struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB. * The layout of the firmware block is: * <struct fw_mac> + <info> + <firmware data>. + * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_mac + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. @@ -911,6 +889,7 @@ struct fw_header { * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. * @bp: break points. Depends on firmware. + * @reserved: reserved space (unused) * @fw_ver_reg: the register to store the fw version. * @fw_ver_data: the firmware version of the current type. * @info: additional information for debugging, and is followed by the @@ -936,8 +915,10 @@ struct fw_mac { /** * struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START. * This is used to set patch key when loading the firmware of PHY. + * @blk_hdr: firmware descriptor (type, length) * @key_reg: the register to write the patch key. * @key_data: patch key. + * @reserved: reserved space (unused) */ struct fw_phy_patch_key { struct fw_block blk_hdr; @@ -950,6 +931,7 @@ struct fw_phy_patch_key { * struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC. * The layout of the firmware block is: * <struct fw_phy_nc> + <info> + <firmware data>. + * @blk_hdr: firmware descriptor (type, length) * @fw_offset: offset of the firmware binary data. The start address of * the data would be the address of struct fw_phy_nc + @fw_offset. * @fw_reg: the register to load the firmware. Depends on chip. @@ -958,8 +940,9 @@ struct fw_phy_patch_key { * @patch_en_addr: the register of enabling patch mode. Depends on chip. * @patch_en_value: patch mode enabled mask. Depends on the firmware. * @mode_reg: the regitster of switching the mode. - * @mod_pre: the mode needing to be set before loading the firmware. - * @mod_post: the mode to be set when finishing to load the firmware. + * @mode_pre: the mode needing to be set before loading the firmware. + * @mode_post: the mode to be set when finishing to load the firmware. + * @reserved: reserved space (unused) * @bp_start: the start register of break points. Depends on chip. * @bp_num: the break point number which needs to be set for this firmware. * Depends on the firmware. @@ -6615,7 +6598,7 @@ static int rtl_fw_init(struct r8152 *tp) return 0; } -static u8 rtl_get_version(struct usb_interface *intf) +u8 rtl8152_get_version(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); u32 ocp_data = 0; @@ -6673,12 +6656,13 @@ static u8 rtl_get_version(struct usb_interface *intf) return version; } +EXPORT_SYMBOL_GPL(rtl8152_get_version); static int rtl8152_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); - u8 version = rtl_get_version(intf); + u8 version = rtl8152_get_version(intf); struct r8152 *tp; struct net_device *netdev; int ret; diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c new file mode 100644 index 000000000000..2c3fabd38b16 --- /dev/null +++ b/drivers/net/usb/r8153_ecm.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/usb/cdc.h> +#include <linux/usb/usbnet.h> +#include <linux/usb/r8152.h> + +#define OCP_BASE 0xe86c + +static int pla_read_word(struct usbnet *dev, u16 index) +{ + u16 byen = BYTE_EN_WORD; + u8 shift = index & 2; + __le32 tmp; + int ret; + + if (shift) + byen <<= shift; + + index &= ~3; + + ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index, + MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); + if (ret < 0) + goto out; + + ret = __le32_to_cpu(tmp); + ret >>= (shift * 8); + ret &= 0xffff; + +out: + return ret; +} + +static int pla_write_word(struct usbnet *dev, u16 index, u32 data) +{ + u32 mask = 0xffff; + u16 byen = BYTE_EN_WORD; + u8 shift = index & 2; + __le32 tmp; + int ret; + + data &= mask; + + if (shift) { + byen <<= shift; + mask <<= (shift * 8); + data <<= (shift * 8); + } + + index &= ~3; + + ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index, + MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); + + if (ret < 0) + goto out; + + data |= __le32_to_cpu(tmp) & ~mask; + tmp = __cpu_to_le32(data); + + ret = usbnet_write_cmd(dev, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, index, + MCU_TYPE_PLA | byen, &tmp, sizeof(tmp)); + +out: + return ret; +} + +static int r8153_ecm_mdio_read(struct net_device *netdev, int phy_id, int reg) +{ + struct usbnet *dev = netdev_priv(netdev); + int ret; + + ret = pla_write_word(dev, OCP_BASE, 0xa000); + if (ret < 0) + goto out; + + ret = pla_read_word(dev, 0xb400 + reg * 2); + +out: + return ret; +} + +static void r8153_ecm_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) +{ + struct usbnet *dev = netdev_priv(netdev); + int ret; + + ret = pla_write_word(dev, OCP_BASE, 0xa000); + if (ret < 0) + return; + + ret = pla_write_word(dev, 0xb400 + reg * 2, val); +} + +static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status; + + status = usbnet_cdc_bind(dev, intf); + if (status < 0) + return status; + + dev->mii.dev = dev->net; + dev->mii.mdio_read = r8153_ecm_mdio_read; + dev->mii.mdio_write = r8153_ecm_mdio_write; + dev->mii.reg_num_mask = 0x1f; + dev->mii.supports_gmii = 1; + + return status; +} + +static const struct driver_info r8153_info = { + .description = "RTL8153 ECM Device", + .flags = FLAG_ETHER, + .bind = r8153_bind, + .unbind = usbnet_cdc_unbind, + .status = usbnet_cdc_status, + .manage_power = usbnet_manage_power, +}; + +static const struct usb_device_id products[] = { +{ + USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&r8153_info, +}, + + { }, /* END */ +}; +MODULE_DEVICE_TABLE(usb, products); + +static int rtl8153_ecm_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ +#if IS_REACHABLE(CONFIG_USB_RTL8152) + if (rtl8152_get_version(intf)) + return -ENODEV; +#endif + + return usbnet_probe(intf, id); +} + +static struct usb_driver r8153_ecm_driver = { + .name = "r8153_ecm", + .id_table = products, + .probe = rtl8153_ecm_probe, + .disconnect = usbnet_disconnect, + .suspend = usbnet_suspend, + .resume = usbnet_resume, + .reset_resume = usbnet_resume, + .supports_autosuspend = 1, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(r8153_ecm_driver); + +MODULE_AUTHOR("Hayes Wang"); +MODULE_DESCRIPTION("Realtek USB ECM device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 6fa7a009a24a..6609d21ef894 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -279,7 +279,7 @@ static const struct net_device_ops rndis_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 0abd257b634c..55a244eca5ca 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -184,7 +184,7 @@ static const struct net_device_ops sierra_net_device_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 8689835a5214..4353b370249f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -1435,7 +1435,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = smsc75xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index ea0d5f04dc3a..4c8ee1cff4d4 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1041,7 +1041,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = smsc95xx_ioctl, diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index e04c8054c2cf..878557ad03ad 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -308,7 +308,7 @@ static const struct net_device_ops sr9700_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = sr9700_ioctl, .ndo_set_rx_mode = sr9700_set_multicast, diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c index 681e0def6356..da56735d7755 100644 --- a/drivers/net/usb/sr9800.c +++ b/drivers/net/usb/sr9800.c @@ -681,7 +681,7 @@ static const struct net_device_ops sr9800_netdev_ops = { .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = sr_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = sr_ioctl, diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 6062dc27870e..1447da1d5729 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -304,7 +304,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev) */ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) { - struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); unsigned long flags; int status; @@ -980,15 +980,6 @@ int usbnet_set_link_ksettings(struct net_device *net, } EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings); -void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) -{ - struct usbnet *dev = netdev_priv(net); - - netdev_stats_to_stats64(stats, &net->stats); - dev_fetch_sw_netstats(stats, dev->stats64); -} -EXPORT_SYMBOL_GPL(usbnet_get_stats64); - u32 usbnet_get_link (struct net_device *net) { struct usbnet *dev = netdev_priv(net); @@ -1220,7 +1211,7 @@ static void tx_complete (struct urb *urb) struct usbnet *dev = entry->dev; if (urb->status == 0) { - struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats); unsigned long flags; flags = u64_stats_update_begin_irqsave(&stats64->syncp); @@ -1596,7 +1587,7 @@ void usbnet_disconnect (struct usb_interface *intf) usb_free_urb(dev->interrupt); kfree(dev->padding_pkt); - free_percpu(dev->stats64); + free_percpu(net->tstats); free_netdev(net); } EXPORT_SYMBOL_GPL(usbnet_disconnect); @@ -1608,7 +1599,7 @@ static const struct net_device_ops usbnet_netdev_ops = { .ndo_tx_timeout = usbnet_tx_timeout, .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_change_mtu = usbnet_change_mtu, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1671,8 +1662,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->driver_info = info; dev->driver_name = name; - dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->stats64) + net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!net->tstats) goto out0; dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV @@ -1812,7 +1803,7 @@ out1: */ cancel_work_sync(&dev->kevent); del_timer_sync(&dev->delay); - free_percpu(dev->stats64); + free_percpu(net->tstats); out0: free_netdev(net); out: diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8c737668008a..02bfcdf50a7a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -884,7 +884,6 @@ static int veth_napi_add(struct net_device *dev) for (i = 0; i < dev->real_num_rx_queues; i++) { struct veth_rq *rq = &priv->rq[i]; - netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); napi_enable(&rq->xdp_napi); } @@ -926,7 +925,8 @@ static int veth_enable_xdp(struct net_device *dev) for (i = 0; i < dev->real_num_rx_queues; i++) { struct veth_rq *rq = &priv->rq[i]; - err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i); + netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT); + err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id); if (err < 0) goto err_rxq_reg; @@ -952,8 +952,12 @@ static int veth_enable_xdp(struct net_device *dev) err_reg_mem: xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); err_rxq_reg: - for (i--; i >= 0; i--) - xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq); + for (i--; i >= 0; i--) { + struct veth_rq *rq = &priv->rq[i]; + + xdp_rxq_info_unreg(&rq->xdp_rxq); + netif_napi_del(&rq->xdp_napi); + } return err; } @@ -1329,7 +1333,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, } if (ifmp && tbp[IFLA_IFNAME]) { - nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); @@ -1379,7 +1383,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, eth_hw_addr_random(dev); if (tb[IFLA_IFNAME]) - nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 21b71148c532..052975ea0af4 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1485,7 +1485,7 @@ static int virtnet_open(struct net_device *dev) if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); - err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); + err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id); if (err < 0) return err; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index b9b7e00b72a8..6d9130859c55 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -9,6 +9,7 @@ * Based on dummy, team and ipvlan drivers */ +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> @@ -1236,6 +1237,61 @@ static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, return skb; } +static int vrf_prepare_mac_header(struct sk_buff *skb, + struct net_device *vrf_dev, u16 proto) +{ + struct ethhdr *eth; + int err; + + /* in general, we do not know if there is enough space in the head of + * the packet for hosting the mac header. + */ + err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev)); + if (unlikely(err)) + /* no space in the skb head */ + return -ENOBUFS; + + __skb_push(skb, ETH_HLEN); + eth = (struct ethhdr *)skb->data; + + skb_reset_mac_header(skb); + + /* we set the ethernet destination and the source addresses to the + * address of the VRF device. + */ + ether_addr_copy(eth->h_dest, vrf_dev->dev_addr); + ether_addr_copy(eth->h_source, vrf_dev->dev_addr); + eth->h_proto = htons(proto); + + /* the destination address of the Ethernet frame corresponds to the + * address set on the VRF interface; therefore, the packet is intended + * to be processed locally. + */ + skb->protocol = eth->h_proto; + skb->pkt_type = PACKET_HOST; + + skb_postpush_rcsum(skb, skb->data, ETH_HLEN); + + skb_pull_inline(skb, ETH_HLEN); + + return 0; +} + +/* prepare and add the mac header to the packet if it was not set previously. + * In this way, packet sniffers such as tcpdump can parse the packet correctly. + * If the mac header was already set, the original mac header is left + * untouched and the function returns immediately. + */ +static int vrf_add_mac_header_if_unset(struct sk_buff *skb, + struct net_device *vrf_dev, + u16 proto) +{ + if (skb_mac_header_was_set(skb)) + return 0; + + return vrf_prepare_mac_header(skb, vrf_dev, proto); +} + #if IS_ENABLED(CONFIG_IPV6) /* neighbor handling is done with actual device; do not want * to flip skb->dev for those ndisc packets. This really fails @@ -1341,9 +1397,15 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, skb->skb_iif = vrf_dev->ifindex; if (!list_empty(&vrf_dev->ptype_all)) { - skb_push(skb, skb->mac_len); - dev_queue_xmit_nit(skb, vrf_dev); - skb_pull(skb, skb->mac_len); + int err; + + err = vrf_add_mac_header_if_unset(skb, vrf_dev, + ETH_P_IPV6); + if (likely(!err)) { + skb_push(skb, skb->mac_len); + dev_queue_xmit_nit(skb, vrf_dev); + skb_pull(skb, skb->mac_len); + } } IP6CB(skb)->flags |= IP6SKB_L3SLAVE; @@ -1386,9 +1448,14 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, vrf_rx_stats(vrf_dev, skb->len); if (!list_empty(&vrf_dev->ptype_all)) { - skb_push(skb, skb->mac_len); - dev_queue_xmit_nit(skb, vrf_dev); - skb_pull(skb, skb->mac_len); + int err; + + err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP); + if (likely(!err)) { + skb_push(skb, skb->mac_len); + dev_queue_xmit_nit(skb, vrf_dev); + skb_pull(skb, skb->mac_len); + } } skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index e8563acf98e8..b1bb1b04b664 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/if_arp.h> diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 977f77e2c2ce..a8ad710629e6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -66,6 +66,7 @@ struct vxlan_net { struct list_head vxlan_list; struct hlist_head sock_list[PORT_HASH_SIZE]; spinlock_t sock_lock; + struct notifier_block nexthop_notifier_block; }; /* Forwarding table entry */ @@ -2476,7 +2477,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk, &fl6, NULL); - if (unlikely(IS_ERR(ndst))) { + if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", daddr); return ERR_PTR(-ENETUNREACH); } @@ -3210,7 +3211,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_open = vxlan_open, .ndo_stop = vxlan_stop, .ndo_start_xmit = vxlan_xmit, - .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_rx_mode = vxlan_set_multicast_list, .ndo_change_mtu = vxlan_change_mtu, .ndo_validate_addr = eth_validate_addr, @@ -3229,7 +3230,7 @@ static const struct net_device_ops vxlan_netdev_raw_ops = { .ndo_open = vxlan_open, .ndo_stop = vxlan_stop, .ndo_start_xmit = vxlan_xmit, - .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = vxlan_change_mtu, .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; @@ -4688,9 +4689,14 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh) static int vxlan_nexthop_event(struct notifier_block *nb, unsigned long event, void *ptr) { - struct nexthop *nh = ptr; + struct nh_notifier_info *info = ptr; + struct nexthop *nh; + + if (event != NEXTHOP_EVENT_DEL) + return NOTIFY_DONE; - if (!nh || event != NEXTHOP_EVENT_DEL) + nh = nexthop_find_by_id(info->net, info->id); + if (!nh) return NOTIFY_DONE; vxlan_fdb_nh_flush(nh); @@ -4698,10 +4704,6 @@ static int vxlan_nexthop_event(struct notifier_block *nb, return NOTIFY_DONE; } -static struct notifier_block vxlan_nexthop_notifier_block __read_mostly = { - .notifier_call = vxlan_nexthop_event, -}; - static __net_init int vxlan_init_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); @@ -4709,11 +4711,13 @@ static __net_init int vxlan_init_net(struct net *net) INIT_LIST_HEAD(&vn->vxlan_list); spin_lock_init(&vn->sock_lock); + vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event; for (h = 0; h < PORT_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vn->sock_list[h]); - return register_nexthop_notifier(net, &vxlan_nexthop_notifier_block); + return register_nexthop_notifier(net, &vn->nexthop_notifier_block, + NULL); } static void vxlan_destroy_tunnels(struct net *net, struct list_head *head) @@ -4745,8 +4749,11 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list) LIST_HEAD(list); rtnl_lock(); - list_for_each_entry(net, net_list, exit_list) - unregister_nexthop_notifier(net, &vxlan_nexthop_notifier_block); + list_for_each_entry(net, net_list, exit_list) { + struct vxlan_net *vn = net_generic(net, vxlan_net_id); + + unregister_nexthop_notifier(net, &vn->nexthop_notifier_block); + } list_for_each_entry(net, net_list, exit_list) vxlan_destroy_tunnels(net, &list); diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 39e5ab261d7c..4029fde71a9e 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -321,51 +321,6 @@ config IXP4XX_HSS Say Y here if you want to use built-in HSS ports on IXP4xx processor. -config DLCI - tristate "Frame Relay DLCI support" - help - Support for the Frame Relay protocol. - - Frame Relay is a fast low-cost way to connect to a remote Internet - access provider or to form a private wide area network. The one - physical line from your box to the local "switch" (i.e. the entry - point to the Frame Relay network, usually at the phone company) can - carry several logical point-to-point connections to other computers - connected to the Frame Relay network. For a general explanation of - the protocol, check out <http://www.mplsforum.org/>. - - To use frame relay, you need supporting hardware (called FRAD) and - certain programs from the net-tools package as explained in - <file:Documentation/networking/framerelay.rst>. - - To compile this driver as a module, choose M here: the - module will be called dlci. - -config DLCI_MAX - int "Max DLCI per device" - depends on DLCI - default "8" - help - How many logical point-to-point frame relay connections (the - identifiers of which are called DCLIs) should be handled by each - of your hardware frame relay access devices. - - Go with the default. - -config SDLA - tristate "SDLA (Sangoma S502/S508) support" - depends on DLCI && ISA - help - Driver for the Sangoma S502A, S502E, and S508 Frame Relay Access - Devices. - - These are multi-protocol cards, but only Frame Relay is supported - by the driver at this time. Please read - <file:Documentation/networking/framerelay.rst>. - - To compile this driver as a module, choose M here: the - module will be called sdla. - # X.25 network drivers config LAPBETHER tristate "LAPB over Ethernet driver" @@ -383,21 +338,6 @@ config LAPBETHER If unsure, say N. -config X25_ASY - tristate "X.25 async driver" - depends on LAPB && X25 && TTY - help - Send and receive X.25 frames over regular asynchronous serial - lines such as telephone lines equipped with ordinary modems. - - Experts should note that this driver doesn't currently comply with - the asynchronous HDLS framing protocols in CCITT recommendation X.25. - - To compile this driver as a module, choose M here: the - module will be called x25_asy. - - If unsure, say N. - config SBNI tristate "Granch SBNI12 Leased Line adapter support" depends on X86 diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index 380271a011e4..081666c36ca2 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -18,12 +18,9 @@ obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o obj-$(CONFIG_COSA) += cosa.o obj-$(CONFIG_FARSYNC) += farsync.o -obj-$(CONFIG_X25_ASY) += x25_asy.o obj-$(CONFIG_LANMEDIA) += lmc/ -obj-$(CONFIG_DLCI) += dlci.o -obj-$(CONFIG_SDLA) += sdla.o obj-$(CONFIG_LAPBETHER) += lapbether.o obj-$(CONFIG_SBNI) += sbni.o obj-$(CONFIG_N2) += n2.o diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c deleted file mode 100644 index 3ca4daf63389..000000000000 --- a/drivers/net/wan/dlci.c +++ /dev/null @@ -1,541 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * DLCI Implementation of Frame Relay protocol for Linux, according to - * RFC 1490. This generic device provides en/decapsulation for an - * underlying hardware driver. Routes & IPs are assigned to these - * interfaces. Requires 'dlcicfg' program to create usable - * interfaces, the initial one, 'dlci' is for IOCTL use only. - * - * Version: @(#)dlci.c 0.35 4 Jan 1997 - * - * Author: Mike McLagan <mike.mclagan@linux.org> - * - * Changes: - * - * 0.15 Mike Mclagan Packet freeing, bug in kmalloc call - * DLCI_RET handling - * 0.20 Mike McLagan More conservative on which packets - * are returned for retry and which are - * are dropped. If DLCI_RET_DROP is - * returned from the FRAD, the packet is - * sent back to Linux for re-transmission - * 0.25 Mike McLagan Converted to use SIOC IOCTL calls - * 0.30 Jim Freeman Fixed to allow IPX traffic - * 0.35 Michael Elizabeth Fixed incorrect memcpy_fromfs - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/if_arp.h> -#include <linux/if_frad.h> -#include <linux/bitops.h> - -#include <net/sock.h> - -#include <asm/io.h> -#include <asm/dma.h> -#include <linux/uaccess.h> - -static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org"; - -static LIST_HEAD(dlci_devs); - -static void dlci_setup(struct net_device *); - -/* - * these encapsulate the RFC 1490 requirements as well as - * deal with packet transmission and reception, working with - * the upper network layers - */ - -static int dlci_header(struct sk_buff *skb, struct net_device *dev, - unsigned short type, const void *daddr, - const void *saddr, unsigned len) -{ - struct frhdr hdr; - unsigned int hlen; - char *dest; - - hdr.control = FRAD_I_UI; - switch (type) - { - case ETH_P_IP: - hdr.IP_NLPID = FRAD_P_IP; - hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID); - break; - - /* feel free to add other types, if necessary */ - - default: - hdr.pad = FRAD_P_PADDING; - hdr.NLPID = FRAD_P_SNAP; - memset(hdr.OUI, 0, sizeof(hdr.OUI)); - hdr.PID = htons(type); - hlen = sizeof(hdr); - break; - } - - dest = skb_push(skb, hlen); - if (!dest) - return 0; - - memcpy(dest, &hdr, hlen); - - return hlen; -} - -static void dlci_receive(struct sk_buff *skb, struct net_device *dev) -{ - struct frhdr *hdr; - int process, header; - - if (!pskb_may_pull(skb, sizeof(*hdr))) { - netdev_notice(dev, "invalid data no header\n"); - dev->stats.rx_errors++; - kfree_skb(skb); - return; - } - - hdr = (struct frhdr *) skb->data; - process = 0; - header = 0; - skb->dev = dev; - - if (hdr->control != FRAD_I_UI) - { - netdev_notice(dev, "Invalid header flag 0x%02X\n", - hdr->control); - dev->stats.rx_errors++; - } - else - switch (hdr->IP_NLPID) - { - case FRAD_P_PADDING: - if (hdr->NLPID != FRAD_P_SNAP) - { - netdev_notice(dev, "Unsupported NLPID 0x%02X\n", - hdr->NLPID); - dev->stats.rx_errors++; - break; - } - - if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0) - { - netdev_notice(dev, "Unsupported organizationally unique identifier 0x%02X-%02X-%02X\n", - hdr->OUI[0], - hdr->OUI[1], - hdr->OUI[2]); - dev->stats.rx_errors++; - break; - } - - /* at this point, it's an EtherType frame */ - header = sizeof(struct frhdr); - /* Already in network order ! */ - skb->protocol = hdr->PID; - process = 1; - break; - - case FRAD_P_IP: - header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID); - skb->protocol = htons(ETH_P_IP); - process = 1; - break; - - case FRAD_P_SNAP: - case FRAD_P_Q933: - case FRAD_P_CLNP: - netdev_notice(dev, "Unsupported NLPID 0x%02X\n", - hdr->pad); - dev->stats.rx_errors++; - break; - - default: - netdev_notice(dev, "Invalid pad byte 0x%02X\n", - hdr->pad); - dev->stats.rx_errors++; - break; - } - - if (process) - { - /* we've set up the protocol, so discard the header */ - skb_reset_mac_header(skb); - skb_pull(skb, header); - dev->stats.rx_bytes += skb->len; - netif_rx(skb); - dev->stats.rx_packets++; - } - else - dev_kfree_skb(skb); -} - -static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev) -{ - struct dlci_local *dlp = netdev_priv(dev); - - if (skb) { - struct netdev_queue *txq = skb_get_tx_queue(dev, skb); - netdev_start_xmit(skb, dlp->slave, txq, false); - } - return NETDEV_TX_OK; -} - -static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, int get) -{ - struct dlci_conf config; - struct dlci_local *dlp; - struct frad_local *flp; - int err; - - dlp = netdev_priv(dev); - - flp = netdev_priv(dlp->slave); - - if (!get) - { - if (copy_from_user(&config, conf, sizeof(struct dlci_conf))) - return -EFAULT; - if (config.flags & ~DLCI_VALID_FLAGS) - return -EINVAL; - memcpy(&dlp->config, &config, sizeof(struct dlci_conf)); - dlp->configured = 1; - } - - err = (*flp->dlci_conf)(dlp->slave, dev, get); - if (err) - return err; - - if (get) - { - if (copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf))) - return -EFAULT; - } - - return 0; -} - -static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct dlci_local *dlp; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - dlp = netdev_priv(dev); - - switch (cmd) - { - case DLCI_GET_SLAVE: - if (!*(short *)(dev->dev_addr)) - return -EINVAL; - - strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave)); - break; - - case DLCI_GET_CONF: - case DLCI_SET_CONF: - if (!*(short *)(dev->dev_addr)) - return -EINVAL; - - return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF); - - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int dlci_change_mtu(struct net_device *dev, int new_mtu) -{ - struct dlci_local *dlp = netdev_priv(dev); - - return dev_set_mtu(dlp->slave, new_mtu); -} - -static int dlci_open(struct net_device *dev) -{ - struct dlci_local *dlp; - struct frad_local *flp; - int err; - - dlp = netdev_priv(dev); - - if (!*(short *)(dev->dev_addr)) - return -EINVAL; - - if (!netif_running(dlp->slave)) - return -ENOTCONN; - - flp = netdev_priv(dlp->slave); - err = (*flp->activate)(dlp->slave, dev); - if (err) - return err; - - netif_start_queue(dev); - - return 0; -} - -static int dlci_close(struct net_device *dev) -{ - struct dlci_local *dlp; - struct frad_local *flp; - - netif_stop_queue(dev); - - dlp = netdev_priv(dev); - - flp = netdev_priv(dlp->slave); - (*flp->deactivate)(dlp->slave, dev); - - return 0; -} - -static int dlci_add(struct dlci_add *dlci) -{ - struct net_device *master, *slave; - struct dlci_local *dlp; - struct frad_local *flp; - int err = -EINVAL; - - - /* validate slave device */ - slave = dev_get_by_name(&init_net, dlci->devname); - if (!slave) - return -ENODEV; - - if (slave->type != ARPHRD_FRAD || netdev_priv(slave) == NULL) - goto err1; - - /* create device name */ - master = alloc_netdev(sizeof(struct dlci_local), "dlci%d", - NET_NAME_UNKNOWN, dlci_setup); - if (!master) { - err = -ENOMEM; - goto err1; - } - - /* make sure same slave not already registered */ - rtnl_lock(); - list_for_each_entry(dlp, &dlci_devs, list) { - if (dlp->slave == slave) { - err = -EBUSY; - goto err2; - } - } - - *(short *)(master->dev_addr) = dlci->dlci; - - dlp = netdev_priv(master); - dlp->slave = slave; - dlp->master = master; - - flp = netdev_priv(slave); - err = (*flp->assoc)(slave, master); - if (err < 0) - goto err2; - - err = register_netdevice(master); - if (err < 0) - goto err2; - - strcpy(dlci->devname, master->name); - - list_add(&dlp->list, &dlci_devs); - rtnl_unlock(); - - return 0; - - err2: - rtnl_unlock(); - free_netdev(master); - err1: - dev_put(slave); - return err; -} - -static int dlci_del(struct dlci_add *dlci) -{ - struct dlci_local *dlp; - struct frad_local *flp; - struct net_device *master, *slave; - int err; - bool found = false; - - rtnl_lock(); - - /* validate slave device */ - master = __dev_get_by_name(&init_net, dlci->devname); - if (!master) { - err = -ENODEV; - goto out; - } - - list_for_each_entry(dlp, &dlci_devs, list) { - if (dlp->master == master) { - found = true; - break; - } - } - if (!found) { - err = -ENODEV; - goto out; - } - - if (netif_running(master)) { - err = -EBUSY; - goto out; - } - - dlp = netdev_priv(master); - slave = dlp->slave; - flp = netdev_priv(slave); - - err = (*flp->deassoc)(slave, master); - if (!err) { - list_del(&dlp->list); - - unregister_netdevice(master); - - dev_put(slave); - } -out: - rtnl_unlock(); - return err; -} - -static int dlci_ioctl(unsigned int cmd, void __user *arg) -{ - struct dlci_add add; - int err; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (copy_from_user(&add, arg, sizeof(struct dlci_add))) - return -EFAULT; - - switch (cmd) - { - case SIOCADDDLCI: - err = dlci_add(&add); - - if (!err) - if (copy_to_user(arg, &add, sizeof(struct dlci_add))) - return -EFAULT; - break; - - case SIOCDELDLCI: - err = dlci_del(&add); - break; - - default: - err = -EINVAL; - } - - return err; -} - -static const struct header_ops dlci_header_ops = { - .create = dlci_header, -}; - -static const struct net_device_ops dlci_netdev_ops = { - .ndo_open = dlci_open, - .ndo_stop = dlci_close, - .ndo_do_ioctl = dlci_dev_ioctl, - .ndo_start_xmit = dlci_transmit, - .ndo_change_mtu = dlci_change_mtu, -}; - -static void dlci_setup(struct net_device *dev) -{ - struct dlci_local *dlp = netdev_priv(dev); - - dev->flags = 0; - dev->header_ops = &dlci_header_ops; - dev->netdev_ops = &dlci_netdev_ops; - dev->needs_free_netdev = true; - - dlp->receive = dlci_receive; - - dev->type = ARPHRD_DLCI; - dev->hard_header_len = sizeof(struct frhdr); - dev->addr_len = sizeof(short); - -} - -/* if slave is unregistering, then cleanup master */ -static int dlci_dev_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - if (dev_net(dev) != &init_net) - return NOTIFY_DONE; - - if (event == NETDEV_UNREGISTER) { - struct dlci_local *dlp; - - list_for_each_entry(dlp, &dlci_devs, list) { - if (dlp->slave == dev) { - list_del(&dlp->list); - unregister_netdevice(dlp->master); - dev_put(dlp->slave); - break; - } - } - } - return NOTIFY_DONE; -} - -static struct notifier_block dlci_notifier = { - .notifier_call = dlci_dev_event, -}; - -static int __init init_dlci(void) -{ - dlci_ioctl_set(dlci_ioctl); - register_netdevice_notifier(&dlci_notifier); - - printk("%s.\n", version); - - return 0; -} - -static void __exit dlci_exit(void) -{ - struct dlci_local *dlp, *nxt; - - dlci_ioctl_set(NULL); - unregister_netdevice_notifier(&dlci_notifier); - - rtnl_lock(); - list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) { - unregister_netdevice(dlp->master); - dev_put(dlp->slave); - } - rtnl_unlock(); -} - -module_init(init_dlci); -module_exit(dlci_exit); - -MODULE_AUTHOR("Mike McLagan"); -MODULE_DESCRIPTION("Frame Relay DLCI layer"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 409e5a7ad8e2..0720f5f92caa 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -871,6 +871,45 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) return 0; } +static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc) +{ + /* OUI 00-00-00 indicates an Ethertype follows */ + if (skb->data[0] == 0x00 && + skb->data[1] == 0x00 && + skb->data[2] == 0x00) { + if (!pvc->main) + return -1; + skb->dev = pvc->main; + skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */ + skb_pull(skb, 5); + skb_reset_mac_header(skb); + return 0; + + /* OUI 00-80-C2 stands for the 802.1 organization */ + } else if (skb->data[0] == 0x00 && + skb->data[1] == 0x80 && + skb->data[2] == 0xC2) { + /* PID 00-07 stands for Ethernet frames without FCS */ + if (skb->data[3] == 0x00 && + skb->data[4] == 0x07) { + if (!pvc->ether) + return -1; + skb_pull(skb, 5); + if (skb->len < ETH_HLEN) + return -1; + skb->protocol = eth_type_trans(skb, pvc->ether); + return 0; + + /* PID unsupported */ + } else { + return -1; + } + + /* OUI unsupported */ + } else { + return -1; + } +} static int fr_rx(struct sk_buff *skb) { @@ -880,9 +919,9 @@ static int fr_rx(struct sk_buff *skb) u8 *data = skb->data; u16 dlci; struct pvc_device *pvc; - struct net_device *dev = NULL; + struct net_device *dev; - if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) + if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI) goto rx_error; dlci = q922_to_dlci(skb->data); @@ -904,8 +943,7 @@ static int fr_rx(struct sk_buff *skb) netdev_info(frad, "No PVC for received frame's DLCI %d\n", dlci); #endif - dev_kfree_skb_any(skb); - return NET_RX_DROP; + goto rx_drop; } if (pvc->state.fecn != fh->fecn) { @@ -931,63 +969,51 @@ static int fr_rx(struct sk_buff *skb) } if (data[3] == NLPID_IP) { + if (!pvc->main) + goto rx_drop; skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ - dev = pvc->main; + skb->dev = pvc->main; skb->protocol = htons(ETH_P_IP); + skb_reset_mac_header(skb); } else if (data[3] == NLPID_IPV6) { + if (!pvc->main) + goto rx_drop; skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ - dev = pvc->main; + skb->dev = pvc->main; skb->protocol = htons(ETH_P_IPV6); + skb_reset_mac_header(skb); - } else if (skb->len > 10 && data[3] == FR_PAD && - data[4] == NLPID_SNAP && data[5] == FR_PAD) { - u16 oui = ntohs(*(__be16*)(data + 6)); - u16 pid = ntohs(*(__be16*)(data + 8)); - skb_pull(skb, 10); - - switch ((((u32)oui) << 16) | pid) { - case ETH_P_ARP: /* routed frame with SNAP */ - case ETH_P_IPX: - case ETH_P_IP: /* a long variant */ - case ETH_P_IPV6: - dev = pvc->main; - skb->protocol = htons(pid); - break; - - case 0x80C20007: /* bridged Ethernet frame */ - if ((dev = pvc->ether) != NULL) - skb->protocol = eth_type_trans(skb, dev); - break; - - default: - netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n", - oui, pid); - dev_kfree_skb_any(skb); - return NET_RX_DROP; + } else if (data[3] == FR_PAD) { + if (skb->len < 5) + goto rx_error; + if (data[4] == NLPID_SNAP) { /* A SNAP header follows */ + skb_pull(skb, 5); + if (skb->len < 5) /* Incomplete SNAP header */ + goto rx_error; + if (fr_snap_parse(skb, pvc)) + goto rx_drop; + } else { + goto rx_drop; } + } else { netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n", data[3], skb->len); - dev_kfree_skb_any(skb); - return NET_RX_DROP; + goto rx_drop; } - if (dev) { - dev->stats.rx_packets++; /* PVC traffic */ - dev->stats.rx_bytes += skb->len; - if (pvc->state.becn) - dev->stats.rx_compressed++; - skb->dev = dev; - netif_rx(skb); - return NET_RX_SUCCESS; - } else { - dev_kfree_skb_any(skb); - return NET_RX_DROP; - } + dev = skb->dev; + dev->stats.rx_packets++; /* PVC traffic */ + dev->stats.rx_bytes += skb->len; + if (pvc->state.becn) + dev->stats.rx_compressed++; + netif_rx(skb); + return NET_RX_SUCCESS; - rx_error: +rx_error: frad->stats.rx_errors++; /* Mark error */ +rx_drop: dev_kfree_skb_any(skb); return NET_RX_DROP; } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index f52b9fed0593..bb164805804e 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -77,7 +77,6 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) } skb_push(skb, 1); - skb_reset_network_header(skb); ptr = skb->data; *ptr = X25_IFACE_DATA; @@ -118,7 +117,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); - skb_reset_network_header(skb); if ((result = lapb_data_request(dev, skb)) != LAPB_OK) dev_kfree_skb(skb); return NETDEV_TX_OK; diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index b6be2454b8bd..605fe555e157 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -55,6 +55,9 @@ struct lapbethdev { static LIST_HEAD(lapbeth_devices); +static void lapbeth_connected(struct net_device *dev, int reason); +static void lapbeth_disconnected(struct net_device *dev, int reason); + /* ------------------------------------------------------------------------ */ /* @@ -167,11 +170,17 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, case X25_IFACE_DATA: break; case X25_IFACE_CONNECT: - if ((err = lapb_connect_request(dev)) != LAPB_OK) + err = lapb_connect_request(dev); + if (err == LAPB_CONNECTED) + lapbeth_connected(dev, LAPB_OK); + else if (err != LAPB_OK) pr_err("lapb_connect_request error: %d\n", err); goto drop; case X25_IFACE_DISCONNECT: - if ((err = lapb_disconnect_request(dev)) != LAPB_OK) + err = lapb_disconnect_request(dev); + if (err == LAPB_NOTCONNECTED) + lapbeth_disconnected(dev, LAPB_OK); + else if (err != LAPB_OK) pr_err("lapb_disconnect_request err: %d\n", err); fallthrough; default: diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 36600b0a0ab0..93c7e8502845 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -353,9 +353,8 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ switch(xc.command){ case lmc_xilinx_reset: /*fold02*/ { - u16 mii; spin_lock_irqsave(&sc->lmc_lock, flags); - mii = lmc_mii_readreg (sc, 0, 16); + lmc_mii_readreg (sc, 0, 16); /* * Make all of them 0 and make input @@ -424,10 +423,9 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; case lmc_xilinx_load_prom: /*fold02*/ { - u16 mii; int timeout = 500000; spin_lock_irqsave(&sc->lmc_lock, flags); - mii = lmc_mii_readreg (sc, 0, 16); + lmc_mii_readreg (sc, 0, 16); /* * Make all of them 0 and make input @@ -1185,7 +1183,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ int i; s32 stat; unsigned int badtx; - u32 firstcsr; int max_work = LMC_RXDESCS; int handled = 0; @@ -1203,8 +1200,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ goto lmc_int_fail_out; } - firstcsr = csr; - /* always go through this loop at least once */ while (csr & sc->lmc_intrmask) { handled = 1; diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index d0062224b216..ba5cc0c53833 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -92,7 +92,7 @@ typedef struct card_s { #define get_port(card, port) (&card->ports[port]) -#define sca_flush(card) (sca_in(IER0, card)); +#define sca_flush(card) (sca_in(IER0, card)) static inline void new_memcpy_toio(char __iomem *dest, char *src, int length) { diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c deleted file mode 100644 index bc2c1c7fb1a4..000000000000 --- a/drivers/net/wan/sdla.c +++ /dev/null @@ -1,1655 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * SDLA An implementation of a driver for the Sangoma S502/S508 series - * multi-protocol PC interface card. Initial offering is with - * the DLCI driver, providing Frame Relay support for linux. - * - * Global definitions for the Frame relay interface. - * - * Version: @(#)sdla.c 0.30 12 Sep 1996 - * - * Credits: Sangoma Technologies, for the use of 2 cards for an extended - * period of time. - * David Mandelstam <dm@sangoma.com> for getting me started on - * this project, and incentive to complete it. - * Gene Kozen <74604.152@compuserve.com> for providing me with - * important information about the cards. - * - * Author: Mike McLagan <mike.mclagan@linux.org> - * - * Changes: - * 0.15 Mike McLagan Improved error handling, packet dropping - * 0.20 Mike McLagan New transmit/receive flags for config - * If in FR mode, don't accept packets from - * non DLCI devices. - * 0.25 Mike McLagan Fixed problem with rejecting packets - * from non DLCI devices. - * 0.30 Mike McLagan Fixed kernel panic when used with modified - * ifconfig - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/fcntl.h> -#include <linux/interrupt.h> -#include <linux/ptrace.h> -#include <linux/ioport.h> -#include <linux/in.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/timer.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/if_arp.h> -#include <linux/if_frad.h> -#include <linux/sdla.h> -#include <linux/bitops.h> - -#include <asm/io.h> -#include <asm/dma.h> -#include <linux/uaccess.h> - -static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org"; - -static unsigned int valid_port[] = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390}; - -static unsigned int valid_mem[] = { - 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000, - 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000, - 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000, - 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000, - 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000}; - -static DEFINE_SPINLOCK(sdla_lock); - -/********************************************************* - * - * these are the core routines that access the card itself - * - *********************************************************/ - -#define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW) - -static void __sdla_read(struct net_device *dev, int addr, void *buf, short len) -{ - char *temp; - const void *base; - int offset, bytes; - - temp = buf; - while(len) - { - offset = addr & SDLA_ADDR_MASK; - bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len; - base = (const void *) (dev->mem_start + offset); - - SDLA_WINDOW(dev, addr); - memcpy(temp, base, bytes); - - addr += bytes; - temp += bytes; - len -= bytes; - } -} - -static void sdla_read(struct net_device *dev, int addr, void *buf, short len) -{ - unsigned long flags; - spin_lock_irqsave(&sdla_lock, flags); - __sdla_read(dev, addr, buf, len); - spin_unlock_irqrestore(&sdla_lock, flags); -} - -static void __sdla_write(struct net_device *dev, int addr, - const void *buf, short len) -{ - const char *temp; - void *base; - int offset, bytes; - - temp = buf; - while(len) - { - offset = addr & SDLA_ADDR_MASK; - bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len; - base = (void *) (dev->mem_start + offset); - - SDLA_WINDOW(dev, addr); - memcpy(base, temp, bytes); - - addr += bytes; - temp += bytes; - len -= bytes; - } -} - -static void sdla_write(struct net_device *dev, int addr, - const void *buf, short len) -{ - unsigned long flags; - - spin_lock_irqsave(&sdla_lock, flags); - __sdla_write(dev, addr, buf, len); - spin_unlock_irqrestore(&sdla_lock, flags); -} - - -static void sdla_clear(struct net_device *dev) -{ - unsigned long flags; - char *base; - int len, addr, bytes; - - len = 65536; - addr = 0; - bytes = SDLA_WINDOW_SIZE; - base = (void *) dev->mem_start; - - spin_lock_irqsave(&sdla_lock, flags); - while(len) - { - SDLA_WINDOW(dev, addr); - memset(base, 0, bytes); - - addr += bytes; - len -= bytes; - } - spin_unlock_irqrestore(&sdla_lock, flags); - -} - -static char sdla_byte(struct net_device *dev, int addr) -{ - unsigned long flags; - char byte, *temp; - - temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK)); - - spin_lock_irqsave(&sdla_lock, flags); - SDLA_WINDOW(dev, addr); - byte = *temp; - spin_unlock_irqrestore(&sdla_lock, flags); - - return byte; -} - -static void sdla_stop(struct net_device *dev) -{ - struct frad_local *flp; - - flp = netdev_priv(dev); - switch(flp->type) - { - case SDLA_S502A: - outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL); - flp->state = SDLA_HALT; - break; - case SDLA_S502E: - outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL); - outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL); - flp->state = SDLA_S502E_ENABLE; - break; - case SDLA_S507: - flp->state &= ~SDLA_CPUEN; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - break; - case SDLA_S508: - flp->state &= ~SDLA_CPUEN; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - break; - } -} - -static void sdla_start(struct net_device *dev) -{ - struct frad_local *flp; - - flp = netdev_priv(dev); - switch(flp->type) - { - case SDLA_S502A: - outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL); - outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL); - flp->state = SDLA_S502A_START; - break; - case SDLA_S502E: - outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL); - outb(0x00, dev->base_addr + SDLA_REG_CONTROL); - flp->state = 0; - break; - case SDLA_S507: - flp->state |= SDLA_CPUEN; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - break; - case SDLA_S508: - flp->state |= SDLA_CPUEN; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - break; - } -} - -/**************************************************** - * - * this is used for the S502A/E cards to determine - * the speed of the onboard CPU. Calibration is - * necessary for the Frame Relay code uploaded - * later. Incorrect results cause timing problems - * with link checks & status messages - * - ***************************************************/ - -static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2) -{ - unsigned long start, done, now; - char resp, *temp; - - start = now = jiffies; - done = jiffies + jiffs; - - temp = (void *)dev->mem_start; - temp += z80_addr & SDLA_ADDR_MASK; - - resp = ~resp1; - while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2))) - { - if (jiffies != now) - { - SDLA_WINDOW(dev, z80_addr); - now = jiffies; - resp = *temp; - } - } - return time_before(jiffies, done) ? jiffies - start : -1; -} - -/* constants for Z80 CPU speed */ -#define Z80_READY '1' /* Z80 is ready to begin */ -#define LOADER_READY '2' /* driver is ready to begin */ -#define Z80_SCC_OK '3' /* SCC is on board */ -#define Z80_SCC_BAD '4' /* SCC was not found */ - -static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr) -{ - int jiffs; - char data; - - sdla_start(dev); - if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0) - return -EIO; - - data = LOADER_READY; - sdla_write(dev, 0, &data, 1); - - if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0) - return -EIO; - - sdla_stop(dev); - sdla_read(dev, 0, &data, 1); - - if (data == Z80_SCC_BAD) - { - printk("%s: SCC bad\n", dev->name); - return -EIO; - } - - if (data != Z80_SCC_OK) - return -EINVAL; - - if (jiffs < 165) - ifr->ifr_mtu = SDLA_CPU_16M; - else if (jiffs < 220) - ifr->ifr_mtu = SDLA_CPU_10M; - else if (jiffs < 258) - ifr->ifr_mtu = SDLA_CPU_8M; - else if (jiffs < 357) - ifr->ifr_mtu = SDLA_CPU_7M; - else if (jiffs < 467) - ifr->ifr_mtu = SDLA_CPU_5M; - else - ifr->ifr_mtu = SDLA_CPU_3M; - - return 0; -} - -/************************************************ - * - * Direct interaction with the Frame Relay code - * starts here. - * - ************************************************/ - -struct _dlci_stat -{ - short dlci; - char flags; -} __packed; - -struct _frad_stat -{ - char flags; - struct _dlci_stat dlcis[SDLA_MAX_DLCI]; -}; - -static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data) -{ - struct _dlci_stat *pstatus; - short *pdlci; - int i; - char *state, line[30]; - - switch (ret) - { - case SDLA_RET_MODEM: - state = data; - if (*state & SDLA_MODEM_DCD_LOW) - netdev_info(dev, "Modem DCD unexpectedly low!\n"); - if (*state & SDLA_MODEM_CTS_LOW) - netdev_info(dev, "Modem CTS unexpectedly low!\n"); - /* I should probably do something about this! */ - break; - - case SDLA_RET_CHANNEL_OFF: - netdev_info(dev, "Channel became inoperative!\n"); - /* same here */ - break; - - case SDLA_RET_CHANNEL_ON: - netdev_info(dev, "Channel became operative!\n"); - /* same here */ - break; - - case SDLA_RET_DLCI_STATUS: - netdev_info(dev, "Status change reported by Access Node\n"); - len /= sizeof(struct _dlci_stat); - for(pstatus = data, i=0;i < len;i++,pstatus++) - { - if (pstatus->flags & SDLA_DLCI_NEW) - state = "new"; - else if (pstatus->flags & SDLA_DLCI_DELETED) - state = "deleted"; - else if (pstatus->flags & SDLA_DLCI_ACTIVE) - state = "active"; - else - { - sprintf(line, "unknown status: %02X", pstatus->flags); - state = line; - } - netdev_info(dev, "DLCI %i: %s\n", - pstatus->dlci, state); - /* same here */ - } - break; - - case SDLA_RET_DLCI_UNKNOWN: - netdev_info(dev, "Received unknown DLCIs:"); - len /= sizeof(short); - for(pdlci = data,i=0;i < len;i++,pdlci++) - pr_cont(" %i", *pdlci); - pr_cont("\n"); - break; - - case SDLA_RET_TIMEOUT: - netdev_err(dev, "Command timed out!\n"); - break; - - case SDLA_RET_BUF_OVERSIZE: - netdev_info(dev, "Bc/CIR overflow, acceptable size is %i\n", - len); - break; - - case SDLA_RET_BUF_TOO_BIG: - netdev_info(dev, "Buffer size over specified max of %i\n", - len); - break; - - case SDLA_RET_CHANNEL_INACTIVE: - case SDLA_RET_DLCI_INACTIVE: - case SDLA_RET_CIR_OVERFLOW: - case SDLA_RET_NO_BUFS: - if (cmd == SDLA_INFORMATION_WRITE) - break; - fallthrough; - - default: - netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n", - cmd, ret); - /* Further processing could be done here */ - break; - } -} - -static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags, - void *inbuf, short inlen, void *outbuf, short *outlen) -{ - static struct _frad_stat status; - struct frad_local *flp; - struct sdla_cmd *cmd_buf; - unsigned long pflags; - unsigned long jiffs; - int ret, waiting, len; - long window; - - flp = netdev_priv(dev); - window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF; - cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK)); - ret = 0; - len = 0; - jiffs = jiffies + HZ; /* 1 second is plenty */ - - spin_lock_irqsave(&sdla_lock, pflags); - SDLA_WINDOW(dev, window); - cmd_buf->cmd = cmd; - cmd_buf->dlci = dlci; - cmd_buf->flags = flags; - - if (inbuf) - memcpy(cmd_buf->data, inbuf, inlen); - - cmd_buf->length = inlen; - - cmd_buf->opp_flag = 1; - spin_unlock_irqrestore(&sdla_lock, pflags); - - waiting = 1; - len = 0; - while (waiting && time_before_eq(jiffies, jiffs)) - { - if (waiting++ % 3) - { - spin_lock_irqsave(&sdla_lock, pflags); - SDLA_WINDOW(dev, window); - waiting = ((volatile int)(cmd_buf->opp_flag)); - spin_unlock_irqrestore(&sdla_lock, pflags); - } - } - - if (!waiting) - { - - spin_lock_irqsave(&sdla_lock, pflags); - SDLA_WINDOW(dev, window); - ret = cmd_buf->retval; - len = cmd_buf->length; - if (outbuf && outlen) - { - *outlen = *outlen >= len ? len : *outlen; - - if (*outlen) - memcpy(outbuf, cmd_buf->data, *outlen); - } - - /* This is a local copy that's used for error handling */ - if (ret) - memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len); - - spin_unlock_irqrestore(&sdla_lock, pflags); - } - else - ret = SDLA_RET_TIMEOUT; - - if (ret != SDLA_RET_OK) - sdla_errors(dev, cmd, dlci, ret, len, &status); - - return ret; -} - -/*********************************************** - * - * these functions are called by the DLCI driver - * - ***********************************************/ - -static int sdla_reconfig(struct net_device *dev); - -static int sdla_activate(struct net_device *slave, struct net_device *master) -{ - struct frad_local *flp; - int i; - - flp = netdev_priv(slave); - - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->master[i] == master) - break; - - if (i == CONFIG_DLCI_MAX) - return -ENODEV; - - flp->dlci[i] = abs(flp->dlci[i]); - - if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) - sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); - - return 0; -} - -static int sdla_deactivate(struct net_device *slave, struct net_device *master) -{ - struct frad_local *flp; - int i; - - flp = netdev_priv(slave); - - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->master[i] == master) - break; - - if (i == CONFIG_DLCI_MAX) - return -ENODEV; - - flp->dlci[i] = -abs(flp->dlci[i]); - - if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE)) - sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL); - - return 0; -} - -static int sdla_assoc(struct net_device *slave, struct net_device *master) -{ - struct frad_local *flp; - int i; - - if (master->type != ARPHRD_DLCI) - return -EINVAL; - - flp = netdev_priv(slave); - - for(i=0;i<CONFIG_DLCI_MAX;i++) - { - if (!flp->master[i]) - break; - if (abs(flp->dlci[i]) == *(short *)(master->dev_addr)) - return -EADDRINUSE; - } - - if (i == CONFIG_DLCI_MAX) - return -EMLINK; /* #### Alan: Comments on this ?? */ - - - flp->master[i] = master; - flp->dlci[i] = -*(short *)(master->dev_addr); - master->mtu = slave->mtu; - - if (netif_running(slave)) { - if (flp->config.station == FRAD_STATION_CPE) - sdla_reconfig(slave); - else - sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); - } - - return 0; -} - -static int sdla_deassoc(struct net_device *slave, struct net_device *master) -{ - struct frad_local *flp; - int i; - - flp = netdev_priv(slave); - - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->master[i] == master) - break; - - if (i == CONFIG_DLCI_MAX) - return -ENODEV; - - flp->master[i] = NULL; - flp->dlci[i] = 0; - - - if (netif_running(slave)) { - if (flp->config.station == FRAD_STATION_CPE) - sdla_reconfig(slave); - else - sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL); - } - - return 0; -} - -static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) -{ - struct frad_local *flp; - struct dlci_local *dlp; - int i; - short len, ret; - - flp = netdev_priv(slave); - - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->master[i] == master) - break; - - if (i == CONFIG_DLCI_MAX) - return -ENODEV; - - dlp = netdev_priv(master); - - ret = SDLA_RET_OK; - len = sizeof(struct dlci_conf); - if (netif_running(slave)) { - if (get) - ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, - NULL, 0, &dlp->config, &len); - else - ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, - &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL); - } - - return ret == SDLA_RET_OK ? 0 : -EIO; -} - -/************************** - * - * now for the Linux driver - * - **************************/ - -/* NOTE: the DLCI driver deals with freeing the SKB!! */ -static netdev_tx_t sdla_transmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct frad_local *flp; - int ret, addr, accept, i; - short size; - unsigned long flags; - struct buf_entry *pbuf; - - flp = netdev_priv(dev); - ret = 0; - accept = 1; - - netif_stop_queue(dev); - - /* - * stupid GateD insists on setting up the multicast router thru us - * and we're ill equipped to handle a non Frame Relay packet at this - * time! - */ - - accept = 1; - switch (dev->type) - { - case ARPHRD_FRAD: - if (skb->dev->type != ARPHRD_DLCI) - { - netdev_warn(dev, "Non DLCI device, type %i, tried to send on FRAD module\n", - skb->dev->type); - accept = 0; - } - break; - default: - netdev_warn(dev, "unknown firmware type 0x%04X\n", - dev->type); - accept = 0; - break; - } - if (accept) - { - /* this is frame specific, but till there's a PPP module, it's the default */ - switch (flp->type) - { - case SDLA_S502A: - case SDLA_S502E: - ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL); - break; - case SDLA_S508: - size = sizeof(addr); - ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size); - if (ret == SDLA_RET_OK) - { - - spin_lock_irqsave(&sdla_lock, flags); - SDLA_WINDOW(dev, addr); - pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK)); - __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len); - SDLA_WINDOW(dev, addr); - pbuf->opp_flag = 1; - spin_unlock_irqrestore(&sdla_lock, flags); - } - break; - } - - switch (ret) - { - case SDLA_RET_OK: - dev->stats.tx_packets++; - break; - - case SDLA_RET_CIR_OVERFLOW: - case SDLA_RET_BUF_OVERSIZE: - case SDLA_RET_NO_BUFS: - dev->stats.tx_dropped++; - break; - - default: - dev->stats.tx_errors++; - break; - } - } - netif_wake_queue(dev); - for(i=0;i<CONFIG_DLCI_MAX;i++) - { - if(flp->master[i]!=NULL) - netif_wake_queue(flp->master[i]); - } - - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -static void sdla_receive(struct net_device *dev) -{ - struct net_device *master; - struct frad_local *flp; - struct dlci_local *dlp; - struct sk_buff *skb; - - struct sdla_cmd *cmd; - struct buf_info *pbufi; - struct buf_entry *pbuf; - - unsigned long flags; - int i=0, received, success, addr, buf_base, buf_top; - short dlci, len, len2, split; - - flp = netdev_priv(dev); - success = 1; - received = addr = buf_top = buf_base = 0; - len = dlci = 0; - skb = NULL; - master = NULL; - cmd = NULL; - pbufi = NULL; - pbuf = NULL; - - spin_lock_irqsave(&sdla_lock, flags); - - switch (flp->type) - { - case SDLA_S502A: - case SDLA_S502E: - cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK)); - SDLA_WINDOW(dev, SDLA_502_RCV_BUF); - success = cmd->opp_flag; - if (!success) - break; - - dlci = cmd->dlci; - len = cmd->length; - break; - - case SDLA_S508: - pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK)); - SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO); - pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK)); - success = pbuf->opp_flag; - if (!success) - break; - - buf_top = pbufi->buf_top; - buf_base = pbufi->buf_base; - dlci = pbuf->dlci; - len = pbuf->length; - addr = pbuf->buf_addr; - break; - } - - /* common code, find the DLCI and get the SKB */ - if (success) - { - for (i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i] == dlci) - break; - - if (i == CONFIG_DLCI_MAX) - { - netdev_notice(dev, "Received packet from invalid DLCI %i, ignoring\n", - dlci); - dev->stats.rx_errors++; - success = 0; - } - } - - if (success) - { - master = flp->master[i]; - skb = dev_alloc_skb(len + sizeof(struct frhdr)); - if (skb == NULL) - { - netdev_notice(dev, "Memory squeeze, dropping packet\n"); - dev->stats.rx_dropped++; - success = 0; - } - else - skb_reserve(skb, sizeof(struct frhdr)); - } - - /* pick up the data */ - switch (flp->type) - { - case SDLA_S502A: - case SDLA_S502E: - if (success) - __sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len); - - SDLA_WINDOW(dev, SDLA_502_RCV_BUF); - cmd->opp_flag = 0; - break; - - case SDLA_S508: - if (success) - { - /* is this buffer split off the end of the internal ring buffer */ - split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0; - len2 = len - split; - - __sdla_read(dev, addr, skb_put(skb, len2), len2); - if (split) - __sdla_read(dev, buf_base, skb_put(skb, split), split); - } - - /* increment the buffer we're looking at */ - SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO); - flp->buffer = (flp->buffer + 1) % pbufi->rse_num; - pbuf->opp_flag = 0; - break; - } - - if (success) - { - dev->stats.rx_packets++; - dlp = netdev_priv(master); - (*dlp->receive)(skb, master); - } - - spin_unlock_irqrestore(&sdla_lock, flags); -} - -static irqreturn_t sdla_isr(int dummy, void *dev_id) -{ - struct net_device *dev; - struct frad_local *flp; - char byte; - - dev = dev_id; - - flp = netdev_priv(dev); - - if (!flp->initialized) - { - netdev_warn(dev, "irq %d for uninitialized device\n", dev->irq); - return IRQ_NONE; - } - - byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE); - switch (byte) - { - case SDLA_INTR_RX: - sdla_receive(dev); - break; - - /* the command will get an error return, which is processed above */ - case SDLA_INTR_MODEM: - case SDLA_INTR_STATUS: - sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL); - break; - - case SDLA_INTR_TX: - case SDLA_INTR_COMPLETE: - case SDLA_INTR_TIMER: - netdev_warn(dev, "invalid irq flag 0x%02X\n", byte); - break; - } - - /* the S502E requires a manual acknowledgement of the interrupt */ - if (flp->type == SDLA_S502E) - { - flp->state &= ~SDLA_S502E_INTACK; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - flp->state |= SDLA_S502E_INTACK; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - } - - /* this clears the byte, informing the Z80 we're done */ - byte = 0; - sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte)); - return IRQ_HANDLED; -} - -static void sdla_poll(struct timer_list *t) -{ - struct frad_local *flp = from_timer(flp, t, timer); - struct net_device *dev = flp->dev; - - if (sdla_byte(dev, SDLA_502_RCV_BUF)) - sdla_receive(dev); - - flp->timer.expires = 1; - add_timer(&flp->timer); -} - -static int sdla_close(struct net_device *dev) -{ - struct frad_local *flp; - struct intr_info intr; - int len, i; - short dlcis[CONFIG_DLCI_MAX]; - - flp = netdev_priv(dev); - - len = 0; - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i]) - dlcis[len++] = abs(flp->dlci[i]); - len *= 2; - - if (flp->config.station == FRAD_STATION_NODE) - { - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i] > 0) - sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL); - sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL); - } - - memset(&intr, 0, sizeof(intr)); - /* let's start up the reception */ - switch(flp->type) - { - case SDLA_S502A: - del_timer(&flp->timer); - break; - - case SDLA_S502E: - sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL); - flp->state &= ~SDLA_S502E_INTACK; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - break; - - case SDLA_S507: - break; - - case SDLA_S508: - sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL); - flp->state &= ~SDLA_S508_INTEN; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - break; - } - - sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); - - netif_stop_queue(dev); - - return 0; -} - -struct conf_data { - struct frad_conf config; - short dlci[CONFIG_DLCI_MAX]; -}; - -static int sdla_open(struct net_device *dev) -{ - struct frad_local *flp; - struct dlci_local *dlp; - struct conf_data data; - struct intr_info intr; - int len, i; - char byte; - - flp = netdev_priv(dev); - - if (!flp->initialized) - return -EPERM; - - if (!flp->configured) - return -EPERM; - - /* time to send in the configuration */ - len = 0; - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i]) - data.dlci[len++] = abs(flp->dlci[i]); - len *= 2; - - memcpy(&data.config, &flp->config, sizeof(struct frad_conf)); - len += sizeof(struct frad_conf); - - sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); - sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL); - - if (flp->type == SDLA_S508) - flp->buffer = 0; - - sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); - - /* let's start up the reception */ - memset(&intr, 0, sizeof(intr)); - switch(flp->type) - { - case SDLA_S502A: - flp->timer.expires = 1; - add_timer(&flp->timer); - break; - - case SDLA_S502E: - flp->state |= SDLA_S502E_ENABLE; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - flp->state |= SDLA_S502E_INTACK; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - byte = 0; - sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte)); - intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM; - sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL); - break; - - case SDLA_S507: - break; - - case SDLA_S508: - flp->state |= SDLA_S508_INTEN; - outb(flp->state, dev->base_addr + SDLA_REG_CONTROL); - byte = 0; - sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte)); - intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM; - intr.irq = dev->irq; - sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL); - break; - } - - if (flp->config.station == FRAD_STATION_CPE) - { - byte = SDLA_ICS_STATUS_ENQ; - sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL); - } - else - { - sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL); - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i] > 0) - sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL); - } - - /* configure any specific DLCI settings */ - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i]) - { - dlp = netdev_priv(flp->master[i]); - if (dlp->configured) - sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL); - } - - netif_start_queue(dev); - - return 0; -} - -static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get) -{ - struct frad_local *flp; - struct conf_data data; - int i; - short size; - - if (dev->type == 0xFFFF) - return -EUNATCH; - - flp = netdev_priv(dev); - - if (!get) - { - if (netif_running(dev)) - return -EBUSY; - - if(copy_from_user(&data.config, conf, sizeof(struct frad_conf))) - return -EFAULT; - - if (data.config.station & ~FRAD_STATION_NODE) - return -EINVAL; - - if (data.config.flags & ~FRAD_VALID_FLAGS) - return -EINVAL; - - if ((data.config.kbaud < 0) || - ((data.config.kbaud > 128) && (flp->type != SDLA_S508))) - return -EINVAL; - - if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232)) - return -EINVAL; - - if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU)) - return -EINVAL; - - if ((data.config.T391 < 5) || (data.config.T391 > 30)) - return -EINVAL; - - if ((data.config.T392 < 5) || (data.config.T392 > 30)) - return -EINVAL; - - if ((data.config.N391 < 1) || (data.config.N391 > 255)) - return -EINVAL; - - if ((data.config.N392 < 1) || (data.config.N392 > 10)) - return -EINVAL; - - if ((data.config.N393 < 1) || (data.config.N393 > 10)) - return -EINVAL; - - memcpy(&flp->config, &data.config, sizeof(struct frad_conf)); - flp->config.flags |= SDLA_DIRECT_RECV; - - if (flp->type == SDLA_S508) - flp->config.flags |= SDLA_TX70_RX30; - - if (dev->mtu != flp->config.mtu) - { - /* this is required to change the MTU */ - dev->mtu = flp->config.mtu; - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->master[i]) - flp->master[i]->mtu = flp->config.mtu; - } - - flp->config.mtu += sizeof(struct frhdr); - - /* off to the races! */ - if (!flp->configured) - sdla_start(dev); - - flp->configured = 1; - } - else - { - /* no sense reading if the CPU isn't started */ - if (netif_running(dev)) - { - size = sizeof(data); - if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK) - return -EIO; - } - else - if (flp->configured) - memcpy(&data.config, &flp->config, sizeof(struct frad_conf)); - else - memset(&data.config, 0, sizeof(struct frad_conf)); - - memcpy(&flp->config, &data.config, sizeof(struct frad_conf)); - data.config.flags &= FRAD_VALID_FLAGS; - data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu; - return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0; - } - - return 0; -} - -static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read) -{ - struct sdla_mem mem; - char *temp; - - if(copy_from_user(&mem, info, sizeof(mem))) - return -EFAULT; - - if (read) - { - temp = kzalloc(mem.len, GFP_KERNEL); - if (!temp) - return -ENOMEM; - sdla_read(dev, mem.addr, temp, mem.len); - if(copy_to_user(mem.data, temp, mem.len)) - { - kfree(temp); - return -EFAULT; - } - kfree(temp); - } - else - { - temp = memdup_user(mem.data, mem.len); - if (IS_ERR(temp)) - return PTR_ERR(temp); - sdla_write(dev, mem.addr, temp, mem.len); - kfree(temp); - } - return 0; -} - -static int sdla_reconfig(struct net_device *dev) -{ - struct frad_local *flp; - struct conf_data data; - int i, len; - - flp = netdev_priv(dev); - - len = 0; - for(i=0;i<CONFIG_DLCI_MAX;i++) - if (flp->dlci[i]) - data.dlci[len++] = flp->dlci[i]; - len *= 2; - - memcpy(&data, &flp->config, sizeof(struct frad_conf)); - len += sizeof(struct frad_conf); - - sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); - sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL); - sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL); - - return 0; -} - -static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct frad_local *flp; - - if(!capable(CAP_NET_ADMIN)) - return -EPERM; - - flp = netdev_priv(dev); - - if (!flp->initialized) - return -EINVAL; - - switch (cmd) - { - case FRAD_GET_CONF: - case FRAD_SET_CONF: - return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF); - - case SDLA_IDENTIFY: - ifr->ifr_flags = flp->type; - break; - - case SDLA_CPUSPEED: - return sdla_cpuspeed(dev, ifr); - -/* ========================================================== -NOTE: This is rather a useless action right now, as the - current driver does not support protocols other than - FR. However, Sangoma has modules for a number of - other protocols in the works. -============================================================*/ - case SDLA_PROTOCOL: - if (flp->configured) - return -EALREADY; - - switch (ifr->ifr_flags) - { - case ARPHRD_FRAD: - dev->type = ifr->ifr_flags; - break; - default: - return -ENOPROTOOPT; - } - break; - - case SDLA_CLEARMEM: - sdla_clear(dev); - break; - - case SDLA_WRITEMEM: - case SDLA_READMEM: - if(!capable(CAP_SYS_RAWIO)) - return -EPERM; - return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM); - - case SDLA_START: - sdla_start(dev); - break; - - case SDLA_STOP: - sdla_stop(dev); - break; - - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int sdla_change_mtu(struct net_device *dev, int new_mtu) -{ - if (netif_running(dev)) - return -EBUSY; - - /* for now, you can't change the MTU! */ - return -EOPNOTSUPP; -} - -static int sdla_set_config(struct net_device *dev, struct ifmap *map) -{ - struct frad_local *flp; - int i; - char byte; - unsigned base; - int err = -EINVAL; - - flp = netdev_priv(dev); - - if (flp->initialized) - return -EINVAL; - - for(i=0; i < ARRAY_SIZE(valid_port); i++) - if (valid_port[i] == map->base_addr) - break; - - if (i == ARRAY_SIZE(valid_port)) - return -EINVAL; - - if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ - pr_warn("io-port 0x%04lx in use\n", dev->base_addr); - return -EINVAL; - } - base = map->base_addr; - - /* test for card types, S502A, S502E, S507, S508 */ - /* these tests shut down the card completely, so clear the state */ - flp->type = SDLA_UNKNOWN; - flp->state = 0; - - for(i=1;i<SDLA_IO_EXTENTS;i++) - if (inb(base + i) != 0xFF) - break; - - if (i == SDLA_IO_EXTENTS) { - outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL); - if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) { - outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL); - if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) { - outb(SDLA_HALT, base + SDLA_REG_CONTROL); - flp->type = SDLA_S502E; - goto got_type; - } - } - } - - for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++) - if (inb(base + i) != byte) - break; - - if (i == SDLA_IO_EXTENTS) { - outb(SDLA_HALT, base + SDLA_REG_CONTROL); - if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) { - outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL); - if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) { - outb(SDLA_HALT, base + SDLA_REG_CONTROL); - flp->type = SDLA_S507; - goto got_type; - } - } - } - - outb(SDLA_HALT, base + SDLA_REG_CONTROL); - if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) { - outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL); - if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) { - outb(SDLA_HALT, base + SDLA_REG_CONTROL); - flp->type = SDLA_S508; - goto got_type; - } - } - - outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL); - if (inb(base + SDLA_S502_STS) == 0x40) { - outb(SDLA_S502A_START, base + SDLA_REG_CONTROL); - if (inb(base + SDLA_S502_STS) == 0x40) { - outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL); - if (inb(base + SDLA_S502_STS) == 0x44) { - outb(SDLA_S502A_START, base + SDLA_REG_CONTROL); - flp->type = SDLA_S502A; - goto got_type; - } - } - } - - netdev_notice(dev, "Unknown card type\n"); - err = -ENODEV; - goto fail; - -got_type: - switch(base) { - case 0x270: - case 0x280: - case 0x380: - case 0x390: - if (flp->type != SDLA_S508 && flp->type != SDLA_S507) - goto fail; - } - - switch (map->irq) { - case 2: - if (flp->type != SDLA_S502E) - goto fail; - break; - - case 10: - case 11: - case 12: - case 15: - case 4: - if (flp->type != SDLA_S508 && flp->type != SDLA_S507) - goto fail; - break; - case 3: - case 5: - case 7: - if (flp->type == SDLA_S502A) - goto fail; - break; - - default: - goto fail; - } - - err = -EAGAIN; - if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev)) - goto fail; - - if (flp->type == SDLA_S507) { - switch(dev->irq) { - case 3: - flp->state = SDLA_S507_IRQ3; - break; - case 4: - flp->state = SDLA_S507_IRQ4; - break; - case 5: - flp->state = SDLA_S507_IRQ5; - break; - case 7: - flp->state = SDLA_S507_IRQ7; - break; - case 10: - flp->state = SDLA_S507_IRQ10; - break; - case 11: - flp->state = SDLA_S507_IRQ11; - break; - case 12: - flp->state = SDLA_S507_IRQ12; - break; - case 15: - flp->state = SDLA_S507_IRQ15; - break; - } - } - - for(i=0; i < ARRAY_SIZE(valid_mem); i++) - if (valid_mem[i] == map->mem_start) - break; - - err = -EINVAL; - if (i == ARRAY_SIZE(valid_mem)) - goto fail2; - - if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E) - goto fail2; - - if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B) - goto fail2; - - if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D) - goto fail2; - - byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0; - byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0)); - switch(flp->type) { - case SDLA_S502A: - case SDLA_S502E: - switch (map->mem_start >> 16) { - case 0x0A: - byte |= SDLA_S502_SEG_A; - break; - case 0x0C: - byte |= SDLA_S502_SEG_C; - break; - case 0x0D: - byte |= SDLA_S502_SEG_D; - break; - case 0x0E: - byte |= SDLA_S502_SEG_E; - break; - } - break; - case SDLA_S507: - switch (map->mem_start >> 16) { - case 0x0A: - byte |= SDLA_S507_SEG_A; - break; - case 0x0B: - byte |= SDLA_S507_SEG_B; - break; - case 0x0C: - byte |= SDLA_S507_SEG_C; - break; - case 0x0E: - byte |= SDLA_S507_SEG_E; - break; - } - break; - case SDLA_S508: - switch (map->mem_start >> 16) { - case 0x0A: - byte |= SDLA_S508_SEG_A; - break; - case 0x0C: - byte |= SDLA_S508_SEG_C; - break; - case 0x0D: - byte |= SDLA_S508_SEG_D; - break; - case 0x0E: - byte |= SDLA_S508_SEG_E; - break; - } - break; - } - - /* set the memory bits, and enable access */ - outb(byte, base + SDLA_REG_PC_WINDOW); - - switch(flp->type) - { - case SDLA_S502E: - flp->state = SDLA_S502E_ENABLE; - break; - case SDLA_S507: - flp->state |= SDLA_MEMEN; - break; - case SDLA_S508: - flp->state = SDLA_MEMEN; - break; - } - outb(flp->state, base + SDLA_REG_CONTROL); - - dev->irq = map->irq; - dev->base_addr = base; - dev->mem_start = map->mem_start; - dev->mem_end = dev->mem_start + 0x2000; - flp->initialized = 1; - return 0; - -fail2: - free_irq(map->irq, dev); -fail: - release_region(base, SDLA_IO_EXTENTS); - return err; -} - -static const struct net_device_ops sdla_netdev_ops = { - .ndo_open = sdla_open, - .ndo_stop = sdla_close, - .ndo_do_ioctl = sdla_ioctl, - .ndo_set_config = sdla_set_config, - .ndo_start_xmit = sdla_transmit, - .ndo_change_mtu = sdla_change_mtu, -}; - -static void setup_sdla(struct net_device *dev) -{ - struct frad_local *flp = netdev_priv(dev); - - netdev_boot_setup_check(dev); - - dev->netdev_ops = &sdla_netdev_ops; - dev->flags = 0; - dev->type = 0xFFFF; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->mtu = SDLA_MAX_MTU; - - flp->activate = sdla_activate; - flp->deactivate = sdla_deactivate; - flp->assoc = sdla_assoc; - flp->deassoc = sdla_deassoc; - flp->dlci_conf = sdla_dlci_conf; - flp->dev = dev; - - timer_setup(&flp->timer, sdla_poll, 0); - flp->timer.expires = 1; -} - -static struct net_device *sdla; - -static int __init init_sdla(void) -{ - int err; - - printk("%s.\n", version); - - sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", - NET_NAME_UNKNOWN, setup_sdla); - if (!sdla) - return -ENOMEM; - - err = register_netdev(sdla); - if (err) - free_netdev(sdla); - - return err; -} - -static void __exit exit_sdla(void) -{ - struct frad_local *flp = netdev_priv(sdla); - - unregister_netdev(sdla); - if (flp->initialized) { - free_irq(sdla->irq, sdla); - release_region(sdla->base_addr, SDLA_IO_EXTENTS); - } - del_timer_sync(&flp->timer); - free_netdev(sdla); -} - -MODULE_LICENSE("GPL"); - -module_init(init_sdla); -module_exit(exit_sdla); diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c deleted file mode 100644 index 54b1a5aee82d..000000000000 --- a/drivers/net/wan/x25_asy.c +++ /dev/null @@ -1,836 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Things to sort out: - * - * o tbusy handling - * o allow users to set the parameters - * o sync/async switching ? - * - * Note: This does _not_ implement CCITT X.25 asynchronous framing - * recommendations. Its primarily for testing purposes. If you wanted - * to do CCITT then in theory all you need is to nick the HDLC async - * checksum routines from ppp.c - * Changes: - * - * 2000-10-29 Henner Eisen lapb_data_indication() return status. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/module.h> - -#include <linux/uaccess.h> -#include <linux/bitops.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/in.h> -#include <linux/tty.h> -#include <linux/errno.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/if_arp.h> -#include <linux/lapb.h> -#include <linux/init.h> -#include <linux/rtnetlink.h> -#include <linux/slab.h> -#include <net/x25device.h> -#include "x25_asy.h" - -static struct net_device **x25_asy_devs; -static int x25_asy_maxdev = SL_NRUNIT; - -module_param(x25_asy_maxdev, int, 0); -MODULE_LICENSE("GPL"); - -static int x25_asy_esc(unsigned char *p, unsigned char *d, int len); -static void x25_asy_unesc(struct x25_asy *sl, unsigned char c); -static void x25_asy_setup(struct net_device *dev); - -/* Find a free X.25 channel, and link in this `tty' line. */ -static struct x25_asy *x25_asy_alloc(void) -{ - struct net_device *dev = NULL; - struct x25_asy *sl; - int i; - - if (x25_asy_devs == NULL) - return NULL; /* Master array missing ! */ - - for (i = 0; i < x25_asy_maxdev; i++) { - dev = x25_asy_devs[i]; - - /* Not allocated ? */ - if (dev == NULL) - break; - - sl = netdev_priv(dev); - /* Not in use ? */ - if (!test_and_set_bit(SLF_INUSE, &sl->flags)) - return sl; - } - - - /* Sorry, too many, all slots in use */ - if (i >= x25_asy_maxdev) - return NULL; - - /* If no channels are available, allocate one */ - if (!dev) { - char name[IFNAMSIZ]; - sprintf(name, "x25asy%d", i); - - dev = alloc_netdev(sizeof(struct x25_asy), name, - NET_NAME_UNKNOWN, x25_asy_setup); - if (!dev) - return NULL; - - /* Initialize channel control data */ - sl = netdev_priv(dev); - dev->base_addr = i; - - /* register device so that it can be ifconfig'ed */ - if (register_netdev(dev) == 0) { - /* (Re-)Set the INUSE bit. Very Important! */ - set_bit(SLF_INUSE, &sl->flags); - x25_asy_devs[i] = dev; - return sl; - } else { - pr_warn("%s(): register_netdev() failure\n", __func__); - free_netdev(dev); - } - } - return NULL; -} - - -/* Free an X.25 channel. */ -static void x25_asy_free(struct x25_asy *sl) -{ - /* Free all X.25 frame buffers. */ - kfree(sl->rbuff); - sl->rbuff = NULL; - kfree(sl->xbuff); - sl->xbuff = NULL; - - if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) - netdev_err(sl->dev, "x25_asy_free for already free unit\n"); -} - -static int x25_asy_change_mtu(struct net_device *dev, int newmtu) -{ - struct x25_asy *sl = netdev_priv(dev); - unsigned char *xbuff, *rbuff; - int len; - - len = 2 * newmtu; - xbuff = kmalloc(len + 4, GFP_ATOMIC); - rbuff = kmalloc(len + 4, GFP_ATOMIC); - - if (xbuff == NULL || rbuff == NULL) { - kfree(xbuff); - kfree(rbuff); - return -ENOMEM; - } - - spin_lock_bh(&sl->lock); - xbuff = xchg(&sl->xbuff, xbuff); - if (sl->xleft) { - if (sl->xleft <= len) { - memcpy(sl->xbuff, sl->xhead, sl->xleft); - } else { - sl->xleft = 0; - dev->stats.tx_dropped++; - } - } - sl->xhead = sl->xbuff; - - rbuff = xchg(&sl->rbuff, rbuff); - if (sl->rcount) { - if (sl->rcount <= len) { - memcpy(sl->rbuff, rbuff, sl->rcount); - } else { - sl->rcount = 0; - dev->stats.rx_over_errors++; - set_bit(SLF_ERROR, &sl->flags); - } - } - - dev->mtu = newmtu; - sl->buffsize = len; - - spin_unlock_bh(&sl->lock); - - kfree(xbuff); - kfree(rbuff); - return 0; -} - - -/* Set the "sending" flag. This must be atomic, hence the ASM. */ - -static inline void x25_asy_lock(struct x25_asy *sl) -{ - netif_stop_queue(sl->dev); -} - - -/* Clear the "sending" flag. This must be atomic, hence the ASM. */ - -static inline void x25_asy_unlock(struct x25_asy *sl) -{ - netif_wake_queue(sl->dev); -} - -/* Send an LAPB frame to the LAPB module to process. */ - -static void x25_asy_bump(struct x25_asy *sl) -{ - struct net_device *dev = sl->dev; - struct sk_buff *skb; - int count; - int err; - - count = sl->rcount; - dev->stats.rx_bytes += count; - - skb = dev_alloc_skb(count); - if (skb == NULL) { - netdev_warn(sl->dev, "memory squeeze, dropping packet\n"); - dev->stats.rx_dropped++; - return; - } - skb_put_data(skb, sl->rbuff, count); - err = lapb_data_received(sl->dev, skb); - if (err != LAPB_OK) { - kfree_skb(skb); - printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); - } else { - dev->stats.rx_packets++; - } -} - -/* Encapsulate one IP datagram and stuff into a TTY queue. */ -static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len) -{ - unsigned char *p; - int actual, count, mtu = sl->dev->mtu; - - if (len > mtu) { - /* Sigh, shouldn't occur BUT ... */ - len = mtu; - printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n", - sl->dev->name); - sl->dev->stats.tx_dropped++; - x25_asy_unlock(sl); - return; - } - - p = icp; - count = x25_asy_esc(p, sl->xbuff, len); - - /* Order of next two lines is *very* important. - * When we are sending a little amount of data, - * the transfer may be completed inside driver.write() - * routine, because it's running with interrupts enabled. - * In this case we *never* got WRITE_WAKEUP event, - * if we did not request it before write operation. - * 14 Oct 1994 Dmitry Gorodchanin. - */ - set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); - actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); - sl->xleft = count - actual; - sl->xhead = sl->xbuff + actual; -} - -/* - * Called by the driver when there's room for more data. If we have - * more packets to send, we send them here. - */ -static void x25_asy_write_wakeup(struct tty_struct *tty) -{ - int actual; - struct x25_asy *sl = tty->disc_data; - - /* First make sure we're connected. */ - if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) - return; - - if (sl->xleft <= 0) { - /* Now serial buffer is almost free & we can start - * transmission of another packet */ - sl->dev->stats.tx_packets++; - clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - x25_asy_unlock(sl); - return; - } - - actual = tty->ops->write(tty, sl->xhead, sl->xleft); - sl->xleft -= actual; - sl->xhead += actual; -} - -static void x25_asy_timeout(struct net_device *dev, unsigned int txqueue) -{ - struct x25_asy *sl = netdev_priv(dev); - - spin_lock(&sl->lock); - if (netif_queue_stopped(dev)) { - /* May be we must check transmitter timeout here ? - * 14 Oct 1994 Dmitry Gorodchanin. - */ - netdev_warn(dev, "transmit timed out, %s?\n", - (tty_chars_in_buffer(sl->tty) || sl->xleft) ? - "bad line quality" : "driver error"); - sl->xleft = 0; - clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); - x25_asy_unlock(sl); - } - spin_unlock(&sl->lock); -} - -/* Encapsulate an IP datagram and kick it into a TTY queue. */ - -static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, - struct net_device *dev) -{ - struct x25_asy *sl = netdev_priv(dev); - int err; - - if (!netif_running(sl->dev)) { - netdev_err(dev, "xmit call when iface is down\n"); - kfree_skb(skb); - return NETDEV_TX_OK; - } - - /* There should be a pseudo header of 1 byte added by upper layers. - * Check to make sure it is there before reading it. - */ - if (skb->len < 1) { - kfree_skb(skb); - return NETDEV_TX_OK; - } - - switch (skb->data[0]) { - case X25_IFACE_DATA: - break; - case X25_IFACE_CONNECT: /* Connection request .. do nothing */ - err = lapb_connect_request(dev); - if (err != LAPB_OK) - netdev_err(dev, "lapb_connect_request error: %d\n", - err); - kfree_skb(skb); - return NETDEV_TX_OK; - case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */ - err = lapb_disconnect_request(dev); - if (err != LAPB_OK) - netdev_err(dev, "lapb_disconnect_request error: %d\n", - err); - fallthrough; - default: - kfree_skb(skb); - return NETDEV_TX_OK; - } - skb_pull(skb, 1); /* Remove control byte */ - /* - * If we are busy already- too bad. We ought to be able - * to queue things at this point, to allow for a little - * frame buffer. Oh well... - * ----------------------------------------------------- - * I hate queues in X.25 driver. May be it's efficient, - * but for me latency is more important. ;) - * So, no queues ! - * 14 Oct 1994 Dmitry Gorodchanin. - */ - - err = lapb_data_request(dev, skb); - if (err != LAPB_OK) { - netdev_err(dev, "lapb_data_request error: %d\n", err); - kfree_skb(skb); - return NETDEV_TX_OK; - } - return NETDEV_TX_OK; -} - - -/* - * LAPB interface boilerplate - */ - -/* - * Called when I frame data arrive. We add a pseudo header for upper - * layers and pass it to upper layers. - */ - -static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) -{ - if (skb_cow(skb, 1)) { - kfree_skb(skb); - return NET_RX_DROP; - } - skb_push(skb, 1); - skb->data[0] = X25_IFACE_DATA; - - skb->protocol = x25_type_trans(skb, dev); - - return netif_rx(skb); -} - -/* - * Data has emerged from the LAPB protocol machine. We don't handle - * busy cases too well. Its tricky to see how to do this nicely - - * perhaps lapb should allow us to bounce this ? - */ - -static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb) -{ - struct x25_asy *sl = netdev_priv(dev); - - spin_lock(&sl->lock); - if (netif_queue_stopped(sl->dev) || sl->tty == NULL) { - spin_unlock(&sl->lock); - netdev_err(dev, "tbusy drop\n"); - kfree_skb(skb); - return; - } - /* We were not busy, so we are now... :-) */ - if (skb != NULL) { - x25_asy_lock(sl); - dev->stats.tx_bytes += skb->len; - x25_asy_encaps(sl, skb->data, skb->len); - dev_kfree_skb(skb); - } - spin_unlock(&sl->lock); -} - -/* - * LAPB connection establish/down information. - */ - -static void x25_asy_connected(struct net_device *dev, int reason) -{ - struct x25_asy *sl = netdev_priv(dev); - struct sk_buff *skb; - unsigned char *ptr; - - skb = dev_alloc_skb(1); - if (skb == NULL) { - netdev_err(dev, "out of memory\n"); - return; - } - - ptr = skb_put(skb, 1); - *ptr = X25_IFACE_CONNECT; - - skb->protocol = x25_type_trans(skb, sl->dev); - netif_rx(skb); -} - -static void x25_asy_disconnected(struct net_device *dev, int reason) -{ - struct x25_asy *sl = netdev_priv(dev); - struct sk_buff *skb; - unsigned char *ptr; - - skb = dev_alloc_skb(1); - if (skb == NULL) { - netdev_err(dev, "out of memory\n"); - return; - } - - ptr = skb_put(skb, 1); - *ptr = X25_IFACE_DISCONNECT; - - skb->protocol = x25_type_trans(skb, sl->dev); - netif_rx(skb); -} - -static const struct lapb_register_struct x25_asy_callbacks = { - .connect_confirmation = x25_asy_connected, - .connect_indication = x25_asy_connected, - .disconnect_confirmation = x25_asy_disconnected, - .disconnect_indication = x25_asy_disconnected, - .data_indication = x25_asy_data_indication, - .data_transmit = x25_asy_data_transmit, -}; - - -/* Open the low-level part of the X.25 channel. Easy! */ -static int x25_asy_open(struct net_device *dev) -{ - struct x25_asy *sl = netdev_priv(dev); - unsigned long len; - - if (sl->tty == NULL) - return -ENODEV; - - /* - * Allocate the X.25 frame buffers: - * - * rbuff Receive buffer. - * xbuff Transmit buffer. - */ - - len = dev->mtu * 2; - - sl->rbuff = kmalloc(len + 4, GFP_KERNEL); - if (sl->rbuff == NULL) - goto norbuff; - sl->xbuff = kmalloc(len + 4, GFP_KERNEL); - if (sl->xbuff == NULL) - goto noxbuff; - - sl->buffsize = len; - sl->rcount = 0; - sl->xleft = 0; - sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */ - - return 0; - - /* Cleanup */ - kfree(sl->xbuff); - sl->xbuff = NULL; -noxbuff: - kfree(sl->rbuff); - sl->rbuff = NULL; -norbuff: - return -ENOMEM; -} - - -/* Close the low-level part of the X.25 channel. Easy! */ -static int x25_asy_close(struct net_device *dev) -{ - struct x25_asy *sl = netdev_priv(dev); - - spin_lock(&sl->lock); - if (sl->tty) - clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); - - sl->rcount = 0; - sl->xleft = 0; - spin_unlock(&sl->lock); - return 0; -} - -/* - * Handle the 'receiver data ready' interrupt. - * This function is called by the 'tty_io' module in the kernel when - * a block of X.25 data has been received, which can now be decapsulated - * and sent on to some IP layer for further processing. - */ - -static void x25_asy_receive_buf(struct tty_struct *tty, - const unsigned char *cp, char *fp, int count) -{ - struct x25_asy *sl = tty->disc_data; - - if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) - return; - - - /* Read the characters out of the buffer */ - while (count--) { - if (fp && *fp++) { - if (!test_and_set_bit(SLF_ERROR, &sl->flags)) - sl->dev->stats.rx_errors++; - cp++; - continue; - } - x25_asy_unesc(sl, *cp++); - } -} - -/* - * Open the high-level part of the X.25 channel. - * This function is called by the TTY module when the - * X.25 line discipline is called for. Because we are - * sure the tty line exists, we only have to link it to - * a free X.25 channel... - */ - -static int x25_asy_open_tty(struct tty_struct *tty) -{ - struct x25_asy *sl; - int err; - - if (tty->ops->write == NULL) - return -EOPNOTSUPP; - - /* OK. Find a free X.25 channel to use. */ - sl = x25_asy_alloc(); - if (sl == NULL) - return -ENFILE; - - sl->tty = tty; - tty->disc_data = sl; - tty->receive_room = 65536; - tty_driver_flush_buffer(tty); - tty_ldisc_flush(tty); - - /* Restore default settings */ - sl->dev->type = ARPHRD_X25; - - /* Perform the low-level X.25 async init */ - err = x25_asy_open(sl->dev); - if (err) { - x25_asy_free(sl); - return err; - } - /* Done. We have linked the TTY line to a channel. */ - return 0; -} - - -/* - * Close down an X.25 channel. - * This means flushing out any pending queues, and then restoring the - * TTY line discipline to what it was before it got hooked to X.25 - * (which usually is TTY again). - */ -static void x25_asy_close_tty(struct tty_struct *tty) -{ - struct x25_asy *sl = tty->disc_data; - - /* First make sure we're connected. */ - if (!sl || sl->magic != X25_ASY_MAGIC) - return; - - rtnl_lock(); - if (sl->dev->flags & IFF_UP) - dev_close(sl->dev); - rtnl_unlock(); - - tty->disc_data = NULL; - sl->tty = NULL; - x25_asy_free(sl); -} - - /************************************************************************ - * STANDARD X.25 ENCAPSULATION * - ************************************************************************/ - -static int x25_asy_esc(unsigned char *s, unsigned char *d, int len) -{ - unsigned char *ptr = d; - unsigned char c; - - /* - * Send an initial END character to flush out any - * data that may have accumulated in the receiver - * due to line noise. - */ - - *ptr++ = X25_END; /* Send 10111110 bit seq */ - - /* - * For each byte in the packet, send the appropriate - * character sequence, according to the X.25 protocol. - */ - - while (len-- > 0) { - switch (c = *s++) { - case X25_END: - *ptr++ = X25_ESC; - *ptr++ = X25_ESCAPE(X25_END); - break; - case X25_ESC: - *ptr++ = X25_ESC; - *ptr++ = X25_ESCAPE(X25_ESC); - break; - default: - *ptr++ = c; - break; - } - } - *ptr++ = X25_END; - return ptr - d; -} - -static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) -{ - - switch (s) { - case X25_END: - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount >= 2) - x25_asy_bump(sl); - clear_bit(SLF_ESCAPE, &sl->flags); - sl->rcount = 0; - return; - case X25_ESC: - set_bit(SLF_ESCAPE, &sl->flags); - return; - case X25_ESCAPE(X25_ESC): - case X25_ESCAPE(X25_END): - if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) - s = X25_UNESCAPE(s); - break; - } - if (!test_bit(SLF_ERROR, &sl->flags)) { - if (sl->rcount < sl->buffsize) { - sl->rbuff[sl->rcount++] = s; - return; - } - sl->dev->stats.rx_over_errors++; - set_bit(SLF_ERROR, &sl->flags); - } -} - - -/* Perform I/O control on an active X.25 channel. */ -static int x25_asy_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct x25_asy *sl = tty->disc_data; - - /* First make sure we're connected. */ - if (!sl || sl->magic != X25_ASY_MAGIC) - return -EINVAL; - - switch (cmd) { - case SIOCGIFNAME: - if (copy_to_user((void __user *)arg, sl->dev->name, - strlen(sl->dev->name) + 1)) - return -EFAULT; - return 0; - case SIOCSIFHWADDR: - return -EINVAL; - default: - return tty_mode_ioctl(tty, file, cmd, arg); - } -} - -static int x25_asy_open_dev(struct net_device *dev) -{ - int err; - struct x25_asy *sl = netdev_priv(dev); - if (sl->tty == NULL) - return -ENODEV; - - err = lapb_register(dev, &x25_asy_callbacks); - if (err != LAPB_OK) - return -ENOMEM; - - netif_start_queue(dev); - - return 0; -} - -static int x25_asy_close_dev(struct net_device *dev) -{ - int err; - - netif_stop_queue(dev); - - err = lapb_unregister(dev); - if (err != LAPB_OK) - pr_err("%s: lapb_unregister error: %d\n", - __func__, err); - - x25_asy_close(dev); - - return 0; -} - -static const struct net_device_ops x25_asy_netdev_ops = { - .ndo_open = x25_asy_open_dev, - .ndo_stop = x25_asy_close_dev, - .ndo_start_xmit = x25_asy_xmit, - .ndo_tx_timeout = x25_asy_timeout, - .ndo_change_mtu = x25_asy_change_mtu, -}; - -/* Initialise the X.25 driver. Called by the device init code */ -static void x25_asy_setup(struct net_device *dev) -{ - struct x25_asy *sl = netdev_priv(dev); - - sl->magic = X25_ASY_MAGIC; - sl->dev = dev; - spin_lock_init(&sl->lock); - set_bit(SLF_INUSE, &sl->flags); - - /* - * Finish setting up the DEVICE info. - */ - - dev->mtu = SL_MTU; - dev->min_mtu = 0; - dev->max_mtu = 65534; - dev->netdev_ops = &x25_asy_netdev_ops; - dev->watchdog_timeo = HZ*20; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->type = ARPHRD_X25; - dev->tx_queue_len = 10; - - /* When transmitting data: - * first this driver removes a pseudo header of 1 byte, - * then the lapb module prepends an LAPB header of at most 3 bytes. - */ - dev->needed_headroom = 3 - 1; - - /* New-style flags. */ - dev->flags = IFF_NOARP; -} - -static struct tty_ldisc_ops x25_ldisc = { - .owner = THIS_MODULE, - .magic = TTY_LDISC_MAGIC, - .name = "X.25", - .open = x25_asy_open_tty, - .close = x25_asy_close_tty, - .ioctl = x25_asy_ioctl, - .receive_buf = x25_asy_receive_buf, - .write_wakeup = x25_asy_write_wakeup, -}; - -static int __init init_x25_asy(void) -{ - if (x25_asy_maxdev < 4) - x25_asy_maxdev = 4; /* Sanity */ - - pr_info("X.25 async: version 0.00 ALPHA (dynamic channels, max=%d)\n", - x25_asy_maxdev); - - x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *), - GFP_KERNEL); - if (!x25_asy_devs) - return -ENOMEM; - - return tty_register_ldisc(N_X25, &x25_ldisc); -} - - -static void __exit exit_x25_asy(void) -{ - struct net_device *dev; - int i; - - for (i = 0; i < x25_asy_maxdev; i++) { - dev = x25_asy_devs[i]; - if (dev) { - struct x25_asy *sl = netdev_priv(dev); - - spin_lock_bh(&sl->lock); - if (sl->tty) - tty_hangup(sl->tty); - - spin_unlock_bh(&sl->lock); - /* - * VSV = if dev->start==0, then device - * unregistered while close proc. - */ - unregister_netdev(dev); - free_netdev(dev); - } - } - - kfree(x25_asy_devs); - tty_unregister_ldisc(N_X25); -} - -module_init(init_x25_asy); -module_exit(exit_x25_asy); diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h deleted file mode 100644 index 87798287c9ca..000000000000 --- a/drivers/net/wan/x25_asy.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_X25_ASY_H -#define _LINUX_X25_ASY_H - -/* X.25 asy configuration. */ -#define SL_NRUNIT 256 /* MAX number of X.25 channels; - This can be overridden with - insmod -ox25_asy_maxdev=nnn */ -#define SL_MTU 256 - -/* X25 async protocol characters. */ -#define X25_END 0x7E /* indicates end of frame */ -#define X25_ESC 0x7D /* indicates byte stuffing */ -#define X25_ESCAPE(x) ((x)^0x20) -#define X25_UNESCAPE(x) ((x)^0x20) - - -struct x25_asy { - int magic; - - /* Various fields. */ - spinlock_t lock; - struct tty_struct *tty; /* ptr to TTY structure */ - struct net_device *dev; /* easy for intr handling */ - - /* These are pointers to the malloc()ed frame buffers. */ - unsigned char *rbuff; /* receiver buffer */ - int rcount; /* received chars counter */ - unsigned char *xbuff; /* transmitter buffer */ - unsigned char *xhead; /* pointer to next byte to XMIT */ - int xleft; /* bytes left in XMIT queue */ - int buffsize; /* Max buffers sizes */ - - unsigned long flags; /* Flag values/ mode etc */ -#define SLF_INUSE 0 /* Channel in use */ -#define SLF_ESCAPE 1 /* ESC received */ -#define SLF_ERROR 2 /* Parity, etc. error */ -}; - - - -#define X25_ASY_MAGIC 0x5303 - -int x25_asy_init(struct net_device *dev); - -#endif /* _LINUX_X25_ASY.H */ diff --git a/drivers/net/wimax/Kconfig b/drivers/net/wimax/Kconfig deleted file mode 100644 index 2249e3d77a76..000000000000 --- a/drivers/net/wimax/Kconfig +++ /dev/null @@ -1,18 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# WiMAX LAN device drivers configuration -# - - -comment "Enable WiMAX (Networking options) to see the WiMAX drivers" - depends on WIMAX = n - -if WIMAX - -menu "WiMAX Wireless Broadband devices" - -source "drivers/net/wimax/i2400m/Kconfig" - -endmenu - -endif diff --git a/drivers/net/wimax/Makefile b/drivers/net/wimax/Makefile deleted file mode 100644 index b4575bacf994..000000000000 --- a/drivers/net/wimax/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_WIMAX_I2400M) += i2400m/ diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig deleted file mode 100644 index 843b905a26a3..000000000000 --- a/drivers/net/wimax/i2400m/Kconfig +++ /dev/null @@ -1,37 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -config WIMAX_I2400M - tristate - depends on WIMAX - select FW_LOADER - -comment "Enable USB support to see WiMAX USB drivers" - depends on USB = n - -config WIMAX_I2400M_USB - tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)" - depends on WIMAX && USB - select WIMAX_I2400M - help - Select if you have a device based on the Intel WiMAX - Connection 2400 over USB (like any of the Intel Wireless - WiMAX/WiFi Link 5x50 series). - - If unsure, it is safe to select M (module). - -config WIMAX_I2400M_DEBUG_LEVEL - int "WiMAX i2400m debug level" - depends on WIMAX_I2400M - default 8 - help - - Select the maximum debug verbosity level to be compiled into - the WiMAX i2400m driver code. - - By default, this is disabled at runtime and can be - selectively enabled at runtime for different parts of the - code using the sysfs debug-levels file. - - If set at zero, this will compile out all the debug code. - - It is recommended that it is left at 8. diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile deleted file mode 100644 index b1db1eff0648..000000000000 --- a/drivers/net/wimax/i2400m/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 - -obj-$(CONFIG_WIMAX_I2400M) += i2400m.o -obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o - -i2400m-y := \ - control.o \ - driver.o \ - fw.o \ - op-rfkill.o \ - sysfs.o \ - netdev.o \ - tx.o \ - rx.o - -i2400m-$(CONFIG_DEBUG_FS) += debugfs.o - -i2400m-usb-y := \ - usb-fw.o \ - usb-notif.o \ - usb-tx.o \ - usb-rx.o \ - usb.o diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c deleted file mode 100644 index 8df98757d901..000000000000 --- a/drivers/net/wimax/i2400m/control.c +++ /dev/null @@ -1,1434 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * Miscellaneous control functions for managing the device - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Initial implementation - * - * This is a collection of functions used to control the device (plus - * a few helpers). - * - * There are utilities for handling TLV buffers, hooks on the device's - * reports to act on device changes of state [i2400m_report_hook()], - * on acks to commands [i2400m_msg_ack_hook()], a helper for sending - * commands to the device and blocking until a reply arrives - * [i2400m_msg_to_dev()], a few high level commands for manipulating - * the device state, powersving mode and configuration plus the - * routines to setup the device once communication is stablished with - * it [i2400m_dev_initialize()]. - * - * ROADMAP - * - * i2400m_dev_initialize() Called by i2400m_dev_start() - * i2400m_set_init_config() - * i2400m_cmd_get_state() - * i2400m_dev_shutdown() Called by i2400m_dev_stop() - * i2400m_reset() - * - * i2400m_{cmd,get,set}_*() - * i2400m_msg_to_dev() - * i2400m_msg_check_status() - * - * i2400m_report_hook() Called on reception of an event - * i2400m_report_state_hook() - * i2400m_tlv_buffer_walk() - * i2400m_tlv_match() - * i2400m_report_tlv_system_state() - * i2400m_report_tlv_rf_switches_status() - * i2400m_report_tlv_media_status() - * i2400m_cmd_enter_powersave() - * - * i2400m_msg_ack_hook() Called on reception of a reply to a - * command, get or set - */ - -#include <stdarg.h> -#include "i2400m.h" -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/wimax/i2400m.h> -#include <linux/export.h> -#include <linux/moduleparam.h> - - -#define D_SUBMODULE control -#include "debug-levels.h" - -static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */ -module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644); -MODULE_PARM_DESC(idle_mode_disabled, - "If true, the device will not enable idle mode negotiation " - "with the base station (when connected) to save power."); - -/* 0 (power saving enabled) by default */ -static int i2400m_power_save_disabled; -module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644); -MODULE_PARM_DESC(power_save_disabled, - "If true, the driver will not tell the device to enter " - "power saving mode when it reports it is ready for it. " - "False by default (so the device is told to do power " - "saving)."); - -static int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ -module_param_named(passive_mode, i2400m_passive_mode, int, 0644); -MODULE_PARM_DESC(passive_mode, - "If true, the driver will not do any device setup " - "and leave it up to user space, who must be properly " - "setup."); - - -/* - * Return if a TLV is of a give type and size - * - * @tlv_hdr: pointer to the TLV - * @tlv_type: type of the TLV we are looking for - * @tlv_size: expected size of the TLV we are looking for (if -1, - * don't check the size). This includes the header - * Returns: 0 if the TLV matches - * < 0 if it doesn't match at all - * > 0 total TLV + payload size, if the type matches, but not - * the size - */ -static -ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv, - enum i2400m_tlv tlv_type, ssize_t tlv_size) -{ - if (le16_to_cpu(tlv->type) != tlv_type) /* Not our type? skip */ - return -1; - if (tlv_size != -1 - && le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) { - size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv); - printk(KERN_WARNING "W: tlv type 0x%x mismatched because of " - "size (got %zu vs %zd expected)\n", - tlv_type, size, tlv_size); - return size; - } - return 0; -} - - -/* - * Given a buffer of TLVs, iterate over them - * - * @i2400m: device instance - * @tlv_buf: pointer to the beginning of the TLV buffer - * @buf_size: buffer size in bytes - * @tlv_pos: seek position; this is assumed to be a pointer returned - * by i2400m_tlv_buffer_walk() [and thus, validated]. The - * TLV returned will be the one following this one. - * - * Usage: - * - * tlv_itr = NULL; - * while (tlv_itr = i2400m_tlv_buffer_walk(i2400m, buf, size, tlv_itr)) { - * ... - * // Do stuff with tlv_itr, DON'T MODIFY IT - * ... - * } - */ -static -const struct i2400m_tlv_hdr *i2400m_tlv_buffer_walk( - struct i2400m *i2400m, - const void *tlv_buf, size_t buf_size, - const struct i2400m_tlv_hdr *tlv_pos) -{ - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_tlv_hdr *tlv_top = tlv_buf + buf_size; - size_t offset, length, avail_size; - unsigned type; - - if (tlv_pos == NULL) /* Take the first one? */ - tlv_pos = tlv_buf; - else /* Nope, the next one */ - tlv_pos = (void *) tlv_pos - + le16_to_cpu(tlv_pos->length) + sizeof(*tlv_pos); - if (tlv_pos == tlv_top) { /* buffer done */ - tlv_pos = NULL; - goto error_beyond_end; - } - if (tlv_pos > tlv_top) { - tlv_pos = NULL; - WARN_ON(1); - goto error_beyond_end; - } - offset = (void *) tlv_pos - (void *) tlv_buf; - avail_size = buf_size - offset; - if (avail_size < sizeof(*tlv_pos)) { - dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], tlv @%zu: " - "short header\n", tlv_buf, buf_size, offset); - goto error_short_header; - } - type = le16_to_cpu(tlv_pos->type); - length = le16_to_cpu(tlv_pos->length); - if (avail_size < sizeof(*tlv_pos) + length) { - dev_err(dev, "HW BUG? tlv_buf %p [%zu bytes], " - "tlv type 0x%04x @%zu: " - "short data (%zu bytes vs %zu needed)\n", - tlv_buf, buf_size, type, offset, avail_size, - sizeof(*tlv_pos) + length); - goto error_short_header; - } -error_short_header: -error_beyond_end: - return tlv_pos; -} - - -/* - * Find a TLV in a buffer of sequential TLVs - * - * @i2400m: device descriptor - * @tlv_hdr: pointer to the first TLV in the sequence - * @size: size of the buffer in bytes; all TLVs are assumed to fit - * fully in the buffer (otherwise we'll complain). - * @tlv_type: type of the TLV we are looking for - * @tlv_size: expected size of the TLV we are looking for (if -1, - * don't check the size). This includes the header - * - * Returns: NULL if the TLV is not found, otherwise a pointer to - * it. If the sizes don't match, an error is printed and NULL - * returned. - */ -static -const struct i2400m_tlv_hdr *i2400m_tlv_find( - struct i2400m *i2400m, - const struct i2400m_tlv_hdr *tlv_hdr, size_t size, - enum i2400m_tlv tlv_type, ssize_t tlv_size) -{ - ssize_t match; - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_tlv_hdr *tlv = NULL; - while ((tlv = i2400m_tlv_buffer_walk(i2400m, tlv_hdr, size, tlv))) { - match = i2400m_tlv_match(tlv, tlv_type, tlv_size); - if (match == 0) /* found it :) */ - break; - if (match > 0) - dev_warn(dev, "TLV type 0x%04x found with size " - "mismatch (%zu vs %zd needed)\n", - tlv_type, match, tlv_size); - } - return tlv; -} - - -static const struct -{ - char *msg; - int errno; -} ms_to_errno[I2400M_MS_MAX] = { - [I2400M_MS_DONE_OK] = { "", 0 }, - [I2400M_MS_DONE_IN_PROGRESS] = { "", 0 }, - [I2400M_MS_INVALID_OP] = { "invalid opcode", -ENOSYS }, - [I2400M_MS_BAD_STATE] = { "invalid state", -EILSEQ }, - [I2400M_MS_ILLEGAL_VALUE] = { "illegal value", -EINVAL }, - [I2400M_MS_MISSING_PARAMS] = { "missing parameters", -ENOMSG }, - [I2400M_MS_VERSION_ERROR] = { "bad version", -EIO }, - [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO }, - [I2400M_MS_BUSY] = { "busy", -EBUSY }, - [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ }, - [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ }, - [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO }, - [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO }, - [I2400M_MS_NO_RF] = { "no RF", -EIO }, - [I2400M_MS_NOT_READY_FOR_POWERSAVE] = - { "not ready for powersave", -EACCES }, - [I2400M_MS_THERMAL_CRITICAL] = { "thermal critical", -EL3HLT }, -}; - - -/* - * i2400m_msg_check_status - translate a message's status code - * - * @i2400m: device descriptor - * @l3l4_hdr: message header - * @strbuf: buffer to place a formatted error message (unless NULL). - * @strbuf_size: max amount of available space; larger messages will - * be truncated. - * - * Returns: errno code corresponding to the status code in @l3l4_hdr - * and a message in @strbuf describing the error. - */ -int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr, - char *strbuf, size_t strbuf_size) -{ - int result; - enum i2400m_ms status = le16_to_cpu(l3l4_hdr->status); - const char *str; - - if (status == 0) - return 0; - if (status >= ARRAY_SIZE(ms_to_errno)) { - str = "unknown status code"; - result = -EBADR; - } else { - str = ms_to_errno[status].msg; - result = ms_to_errno[status].errno; - } - if (strbuf) - snprintf(strbuf, strbuf_size, "%s (%d)", str, status); - return result; -} - - -/* - * Act on a TLV System State reported by the device - * - * @i2400m: device descriptor - * @ss: validated System State TLV - */ -static -void i2400m_report_tlv_system_state(struct i2400m *i2400m, - const struct i2400m_tlv_system_state *ss) -{ - struct device *dev = i2400m_dev(i2400m); - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - enum i2400m_system_state i2400m_state = le32_to_cpu(ss->state); - - d_fnstart(3, dev, "(i2400m %p ss %p [%u])\n", i2400m, ss, i2400m_state); - - if (i2400m->state != i2400m_state) { - i2400m->state = i2400m_state; - wake_up_all(&i2400m->state_wq); - } - switch (i2400m_state) { - case I2400M_SS_UNINITIALIZED: - case I2400M_SS_INIT: - case I2400M_SS_CONFIG: - case I2400M_SS_PRODUCTION: - wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED); - break; - - case I2400M_SS_RF_OFF: - case I2400M_SS_RF_SHUTDOWN: - wimax_state_change(wimax_dev, WIMAX_ST_RADIO_OFF); - break; - - case I2400M_SS_READY: - case I2400M_SS_STANDBY: - case I2400M_SS_SLEEPACTIVE: - wimax_state_change(wimax_dev, WIMAX_ST_READY); - break; - - case I2400M_SS_CONNECTING: - case I2400M_SS_WIMAX_CONNECTED: - wimax_state_change(wimax_dev, WIMAX_ST_READY); - break; - - case I2400M_SS_SCAN: - case I2400M_SS_OUT_OF_ZONE: - wimax_state_change(wimax_dev, WIMAX_ST_SCANNING); - break; - - case I2400M_SS_IDLE: - d_printf(1, dev, "entering BS-negotiated idle mode\n"); - fallthrough; - case I2400M_SS_DISCONNECTING: - case I2400M_SS_DATA_PATH_CONNECTED: - wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED); - break; - - default: - /* Huh? just in case, shut it down */ - dev_err(dev, "HW BUG? unknown state %u: shutting down\n", - i2400m_state); - i2400m_reset(i2400m, I2400M_RT_WARM); - break; - } - d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", - i2400m, ss, i2400m_state); -} - - -/* - * Parse and act on a TLV Media Status sent by the device - * - * @i2400m: device descriptor - * @ms: validated Media Status TLV - * - * This will set the carrier up on down based on the device's link - * report. This is done asides of what the WiMAX stack does based on - * the device's state as sometimes we need to do a link-renew (the BS - * wants us to renew a DHCP lease, for example). - * - * In fact, doc says that every time we get a link-up, we should do a - * DHCP negotiation... - */ -static -void i2400m_report_tlv_media_status(struct i2400m *i2400m, - const struct i2400m_tlv_media_status *ms) -{ - struct device *dev = i2400m_dev(i2400m); - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - struct net_device *net_dev = wimax_dev->net_dev; - enum i2400m_media_status status = le32_to_cpu(ms->media_status); - - d_fnstart(3, dev, "(i2400m %p ms %p [%u])\n", i2400m, ms, status); - - switch (status) { - case I2400M_MEDIA_STATUS_LINK_UP: - netif_carrier_on(net_dev); - break; - case I2400M_MEDIA_STATUS_LINK_DOWN: - netif_carrier_off(net_dev); - break; - /* - * This is the network telling us we need to retrain the DHCP - * lease -- so far, we are trusting the WiMAX Network Service - * in user space to pick this up and poke the DHCP client. - */ - case I2400M_MEDIA_STATUS_LINK_RENEW: - netif_carrier_on(net_dev); - break; - default: - dev_err(dev, "HW BUG? unknown media status %u\n", - status); - } - d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n", - i2400m, ms, status); -} - - -/* - * Process a TLV from a 'state report' - * - * @i2400m: device descriptor - * @tlv: pointer to the TLV header; it has been already validated for - * consistent size. - * @tag: for error messages - * - * Act on the TLVs from a 'state report'. - */ -static -void i2400m_report_state_parse_tlv(struct i2400m *i2400m, - const struct i2400m_tlv_hdr *tlv, - const char *tag) -{ - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_tlv_media_status *ms; - const struct i2400m_tlv_system_state *ss; - const struct i2400m_tlv_rf_switches_status *rfss; - - if (0 == i2400m_tlv_match(tlv, I2400M_TLV_SYSTEM_STATE, sizeof(*ss))) { - ss = container_of(tlv, typeof(*ss), hdr); - d_printf(2, dev, "%s: system state TLV " - "found (0x%04x), state 0x%08x\n", - tag, I2400M_TLV_SYSTEM_STATE, - le32_to_cpu(ss->state)); - i2400m_report_tlv_system_state(i2400m, ss); - } - if (0 == i2400m_tlv_match(tlv, I2400M_TLV_RF_STATUS, sizeof(*rfss))) { - rfss = container_of(tlv, typeof(*rfss), hdr); - d_printf(2, dev, "%s: RF status TLV " - "found (0x%04x), sw 0x%02x hw 0x%02x\n", - tag, I2400M_TLV_RF_STATUS, - le32_to_cpu(rfss->sw_rf_switch), - le32_to_cpu(rfss->hw_rf_switch)); - i2400m_report_tlv_rf_switches_status(i2400m, rfss); - } - if (0 == i2400m_tlv_match(tlv, I2400M_TLV_MEDIA_STATUS, sizeof(*ms))) { - ms = container_of(tlv, typeof(*ms), hdr); - d_printf(2, dev, "%s: Media Status TLV: %u\n", - tag, le32_to_cpu(ms->media_status)); - i2400m_report_tlv_media_status(i2400m, ms); - } -} - - -/* - * Parse a 'state report' and extract information - * - * @i2400m: device descriptor - * @l3l4_hdr: pointer to message; it has been already validated for - * consistent size. - * @size: size of the message (header + payload). The header length - * declaration is assumed to be congruent with @size (as in - * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) - * - * Walk over the TLVs in a report state and act on them. - */ -static -void i2400m_report_state_hook(struct i2400m *i2400m, - const struct i2400m_l3l4_hdr *l3l4_hdr, - size_t size, const char *tag) -{ - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_tlv_hdr *tlv; - size_t tlv_size = le16_to_cpu(l3l4_hdr->length); - - d_fnstart(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s)\n", - i2400m, l3l4_hdr, size, tag); - tlv = NULL; - - while ((tlv = i2400m_tlv_buffer_walk(i2400m, &l3l4_hdr->pl, - tlv_size, tlv))) - i2400m_report_state_parse_tlv(i2400m, tlv, tag); - d_fnend(4, dev, "(i2400m %p, l3l4_hdr %p, size %zu, %s) = void\n", - i2400m, l3l4_hdr, size, tag); -} - - -/* - * i2400m_report_hook - (maybe) act on a report - * - * @i2400m: device descriptor - * @l3l4_hdr: pointer to message; it has been already validated for - * consistent size. - * @size: size of the message (header + payload). The header length - * declaration is assumed to be congruent with @size (as in - * sizeof(*l3l4_hdr) + l3l4_hdr->length == size) - * - * Extract information we might need (like carrien on/off) from a - * device report. - */ -void i2400m_report_hook(struct i2400m *i2400m, - const struct i2400m_l3l4_hdr *l3l4_hdr, size_t size) -{ - struct device *dev = i2400m_dev(i2400m); - unsigned msg_type; - - d_fnstart(3, dev, "(i2400m %p l3l4_hdr %p size %zu)\n", - i2400m, l3l4_hdr, size); - /* Chew on the message, we might need some information from - * here */ - msg_type = le16_to_cpu(l3l4_hdr->type); - switch (msg_type) { - case I2400M_MT_REPORT_STATE: /* carrier detection... */ - i2400m_report_state_hook(i2400m, - l3l4_hdr, size, "REPORT STATE"); - break; - /* If the device is ready for power save, then ask it to do - * it. */ - case I2400M_MT_REPORT_POWERSAVE_READY: /* zzzzz */ - if (l3l4_hdr->status == cpu_to_le16(I2400M_MS_DONE_OK)) { - if (i2400m_power_save_disabled) - d_printf(1, dev, "ready for powersave, " - "not requesting (disabled by module " - "parameter)\n"); - else { - d_printf(1, dev, "ready for powersave, " - "requesting\n"); - i2400m_cmd_enter_powersave(i2400m); - } - } - break; - } - d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n", - i2400m, l3l4_hdr, size); -} - - -/* - * i2400m_msg_ack_hook - process cmd/set/get ack for internal status - * - * @i2400m: device descriptor - * @l3l4_hdr: pointer to message; it has been already validated for - * consistent size. - * @size: size of the message - * - * Extract information we might need from acks to commands and act on - * it. This is akin to i2400m_report_hook(). Note most of this - * processing should be done in the function that calls the - * command. This is here for some cases where it can't happen... - */ -static void i2400m_msg_ack_hook(struct i2400m *i2400m, - const struct i2400m_l3l4_hdr *l3l4_hdr, - size_t size) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - unsigned int ack_type; - char strerr[32]; - - /* Chew on the message, we might need some information from - * here */ - ack_type = le16_to_cpu(l3l4_hdr->type); - switch (ack_type) { - case I2400M_MT_CMD_ENTER_POWERSAVE: - /* This is just left here for the sake of example, as - * the processing is done somewhere else. */ - if (0) { - result = i2400m_msg_check_status( - l3l4_hdr, strerr, sizeof(strerr)); - if (result >= 0) - d_printf(1, dev, "ready for power save: %zd\n", - size); - } - break; - } -} - - -/* - * i2400m_msg_size_check() - verify message size and header are congruent - * - * It is ok if the total message size is larger than the expected - * size, as there can be padding. - */ -int i2400m_msg_size_check(struct i2400m *i2400m, - const struct i2400m_l3l4_hdr *l3l4_hdr, - size_t msg_size) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - size_t expected_size; - d_fnstart(4, dev, "(i2400m %p l3l4_hdr %p msg_size %zu)\n", - i2400m, l3l4_hdr, msg_size); - if (msg_size < sizeof(*l3l4_hdr)) { - dev_err(dev, "bad size for message header " - "(expected at least %zu, got %zu)\n", - (size_t) sizeof(*l3l4_hdr), msg_size); - result = -EIO; - goto error_hdr_size; - } - expected_size = le16_to_cpu(l3l4_hdr->length) + sizeof(*l3l4_hdr); - if (msg_size < expected_size) { - dev_err(dev, "bad size for message code 0x%04x (expected %zu, " - "got %zu)\n", le16_to_cpu(l3l4_hdr->type), - expected_size, msg_size); - result = -EIO; - } else - result = 0; -error_hdr_size: - d_fnend(4, dev, - "(i2400m %p l3l4_hdr %p msg_size %zu) = %d\n", - i2400m, l3l4_hdr, msg_size, result); - return result; -} - - - -/* - * Cancel a wait for a command ACK - * - * @i2400m: device descriptor - * @code: [negative] errno code to cancel with (don't use - * -EINPROGRESS) - * - * If there is an ack already filled out, free it. - */ -void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code) -{ - struct sk_buff *ack_skb; - unsigned long flags; - - spin_lock_irqsave(&i2400m->rx_lock, flags); - ack_skb = i2400m->ack_skb; - if (ack_skb && !IS_ERR(ack_skb)) - kfree_skb(ack_skb); - i2400m->ack_skb = ERR_PTR(code); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); -} - - -/** - * i2400m_msg_to_dev - Send a control message to the device and get a response - * - * @i2400m: device descriptor - * - * @buf: pointer to the buffer containing the message to be sent; it - * has to start with a &struct i2400M_l3l4_hdr and then - * followed by the payload. Once this function returns, the - * buffer can be reused. - * - * @buf_len: buffer size - * - * Returns: - * - * Pointer to skb containing the ack message. You need to check the - * pointer with IS_ERR(), as it might be an error code. Error codes - * could happen because: - * - * - the message wasn't formatted correctly - * - couldn't send the message - * - failed waiting for a response - * - the ack message wasn't formatted correctly - * - * The returned skb has been allocated with wimax_msg_to_user_alloc(), - * it contains the response in a netlink attribute and is ready to be - * passed up to user space with wimax_msg_to_user_send(). To access - * the payload and its length, use wimax_msg_{data,len}() on the skb. - * - * The skb has to be freed with kfree_skb() once done. - * - * Description: - * - * This function delivers a message/command to the device and waits - * for an ack to be received. The format is described in - * linux/wimax/i2400m.h. In summary, a command/get/set is followed by an - * ack. - * - * This function will not check the ack status, that's left up to the - * caller. Once done with the ack skb, it has to be kfree_skb()ed. - * - * The i2400m handles only one message at the same time, thus we need - * the mutex to exclude other players. - * - * We write the message and then wait for an answer to come back. The - * RX path intercepts control messages and handles them in - * i2400m_rx_ctl(). Reports (notifications) are (maybe) processed - * locally and then forwarded (as needed) to user space on the WiMAX - * stack message pipe. Acks are saved and passed back to us through an - * skb in i2400m->ack_skb which is ready to be given to generic - * netlink if need be. - */ -struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m, - const void *buf, size_t buf_len) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_l3l4_hdr *msg_l3l4_hdr; - struct sk_buff *ack_skb; - const struct i2400m_l3l4_hdr *ack_l3l4_hdr; - size_t ack_len; - int ack_timeout; - unsigned msg_type; - unsigned long flags; - - d_fnstart(3, dev, "(i2400m %p buf %p len %zu)\n", - i2400m, buf, buf_len); - - rmb(); /* Make sure we see what i2400m_dev_reset_handle() */ - if (i2400m->boot_mode) - return ERR_PTR(-EL3RST); - - msg_l3l4_hdr = buf; - /* Check msg & payload consistency */ - result = i2400m_msg_size_check(i2400m, msg_l3l4_hdr, buf_len); - if (result < 0) - goto error_bad_msg; - msg_type = le16_to_cpu(msg_l3l4_hdr->type); - d_printf(1, dev, "CMD/GET/SET 0x%04x %zu bytes\n", - msg_type, buf_len); - d_dump(2, dev, buf, buf_len); - - /* Setup the completion, ack_skb ("we are waiting") and send - * the message to the device */ - mutex_lock(&i2400m->msg_mutex); - spin_lock_irqsave(&i2400m->rx_lock, flags); - i2400m->ack_skb = ERR_PTR(-EINPROGRESS); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - init_completion(&i2400m->msg_completion); - result = i2400m_tx(i2400m, buf, buf_len, I2400M_PT_CTRL); - if (result < 0) { - dev_err(dev, "can't send message 0x%04x: %d\n", - le16_to_cpu(msg_l3l4_hdr->type), result); - goto error_tx; - } - - /* Some commands take longer to execute because of crypto ops, - * so we give them some more leeway on timeout */ - switch (msg_type) { - case I2400M_MT_GET_TLS_OPERATION_RESULT: - case I2400M_MT_CMD_SEND_EAP_RESPONSE: - ack_timeout = 5 * HZ; - break; - default: - ack_timeout = HZ; - } - - if (unlikely(i2400m->trace_msg_from_user)) - wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL); - /* The RX path in rx.c will put any response for this message - * in i2400m->ack_skb and wake us up. If we cancel the wait, - * we need to change the value of i2400m->ack_skb to something - * not -EINPROGRESS so RX knows there is no one waiting. */ - result = wait_for_completion_interruptible_timeout( - &i2400m->msg_completion, ack_timeout); - if (result == 0) { - dev_err(dev, "timeout waiting for reply to message 0x%04x\n", - msg_type); - result = -ETIMEDOUT; - i2400m_msg_to_dev_cancel_wait(i2400m, result); - goto error_wait_for_completion; - } else if (result < 0) { - dev_err(dev, "error waiting for reply to message 0x%04x: %d\n", - msg_type, result); - i2400m_msg_to_dev_cancel_wait(i2400m, result); - goto error_wait_for_completion; - } - - /* Pull out the ack data from i2400m->ack_skb -- see if it is - * an error and act accordingly */ - spin_lock_irqsave(&i2400m->rx_lock, flags); - ack_skb = i2400m->ack_skb; - if (IS_ERR(ack_skb)) - result = PTR_ERR(ack_skb); - else - result = 0; - i2400m->ack_skb = NULL; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - if (result < 0) - goto error_ack_status; - ack_l3l4_hdr = wimax_msg_data_len(ack_skb, &ack_len); - - /* Check the ack and deliver it if it is ok */ - if (unlikely(i2400m->trace_msg_from_user)) - wimax_msg(&i2400m->wimax_dev, "echo", - ack_l3l4_hdr, ack_len, GFP_KERNEL); - result = i2400m_msg_size_check(i2400m, ack_l3l4_hdr, ack_len); - if (result < 0) { - dev_err(dev, "HW BUG? reply to message 0x%04x: %d\n", - msg_type, result); - goto error_bad_ack_len; - } - if (msg_type != le16_to_cpu(ack_l3l4_hdr->type)) { - dev_err(dev, "HW BUG? bad reply 0x%04x to message 0x%04x\n", - le16_to_cpu(ack_l3l4_hdr->type), msg_type); - result = -EIO; - goto error_bad_ack_type; - } - i2400m_msg_ack_hook(i2400m, ack_l3l4_hdr, ack_len); - mutex_unlock(&i2400m->msg_mutex); - d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %p\n", - i2400m, buf, buf_len, ack_skb); - return ack_skb; - -error_bad_ack_type: -error_bad_ack_len: - kfree_skb(ack_skb); -error_ack_status: -error_wait_for_completion: -error_tx: - mutex_unlock(&i2400m->msg_mutex); -error_bad_msg: - d_fnend(3, dev, "(i2400m %p buf %p len %zu) = %d\n", - i2400m, buf, buf_len, result); - return ERR_PTR(result); -} - - -/* - * Definitions for the Enter Power Save command - * - * The Enter Power Save command requests the device to go into power - * saving mode. The device will ack or nak the command depending on it - * being ready for it. If it acks, we tell the USB subsystem to - * - * As well, the device might request to go into power saving mode by - * sending a report (REPORT_POWERSAVE_READY), in which case, we issue - * this command. The hookups in the RX coder allow - */ -enum { - I2400M_WAKEUP_ENABLED = 0x01, - I2400M_WAKEUP_DISABLED = 0x02, - I2400M_TLV_TYPE_WAKEUP_MODE = 144, -}; - -struct i2400m_cmd_enter_power_save { - struct i2400m_l3l4_hdr hdr; - struct i2400m_tlv_hdr tlv; - __le32 val; -} __packed; - - -/* - * Request entering power save - * - * This command is (mainly) executed when the device indicates that it - * is ready to go into powersave mode via a REPORT_POWERSAVE_READY. - */ -int i2400m_cmd_enter_powersave(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct i2400m_cmd_enter_power_save *cmd; - char strerr[32]; - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_ENTER_POWERSAVE); - cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); - cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); - cmd->tlv.type = cpu_to_le16(I2400M_TLV_TYPE_WAKEUP_MODE); - cmd->tlv.length = cpu_to_le16(sizeof(cmd->val)); - cmd->val = cpu_to_le32(I2400M_WAKEUP_ENABLED); - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - result = PTR_ERR(ack_skb); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'Enter power save' command: %d\n", - result); - goto error_msg_to_dev; - } - result = i2400m_msg_check_status(wimax_msg_data(ack_skb), - strerr, sizeof(strerr)); - if (result == -EACCES) - d_printf(1, dev, "Cannot enter power save mode\n"); - else if (result < 0) - dev_err(dev, "'Enter power save' (0x%04x) command failed: " - "%d - %s\n", I2400M_MT_CMD_ENTER_POWERSAVE, - result, strerr); - else - d_printf(1, dev, "device ready to power save\n"); - kfree_skb(ack_skb); -error_msg_to_dev: - kfree(cmd); -error_alloc: - return result; -} -EXPORT_SYMBOL_GPL(i2400m_cmd_enter_powersave); - - -/* - * Definitions for getting device information - */ -enum { - I2400M_TLV_DETAILED_DEVICE_INFO = 140 -}; - -/** - * i2400m_get_device_info - Query the device for detailed device information - * - * @i2400m: device descriptor - * - * Returns: an skb whose skb->data points to a 'struct - * i2400m_tlv_detailed_device_info'. When done, kfree_skb() it. The - * skb is *guaranteed* to contain the whole TLV data structure. - * - * On error, IS_ERR(skb) is true and ERR_PTR(skb) is the error - * code. - */ -struct sk_buff *i2400m_get_device_info(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct i2400m_l3l4_hdr *cmd; - const struct i2400m_l3l4_hdr *ack; - size_t ack_len; - const struct i2400m_tlv_hdr *tlv; - const struct i2400m_tlv_detailed_device_info *ddi; - char strerr[32]; - - ack_skb = ERR_PTR(-ENOMEM); - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->type = cpu_to_le16(I2400M_MT_GET_DEVICE_INFO); - cmd->length = 0; - cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'get device info' command: %ld\n", - PTR_ERR(ack_skb)); - goto error_msg_to_dev; - } - ack = wimax_msg_data_len(ack_skb, &ack_len); - result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); - if (result < 0) { - dev_err(dev, "'get device info' (0x%04x) command failed: " - "%d - %s\n", I2400M_MT_GET_DEVICE_INFO, result, - strerr); - goto error_cmd_failed; - } - tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), - I2400M_TLV_DETAILED_DEVICE_INFO, sizeof(*ddi)); - if (tlv == NULL) { - dev_err(dev, "GET DEVICE INFO: " - "detailed device info TLV not found (0x%04x)\n", - I2400M_TLV_DETAILED_DEVICE_INFO); - result = -EIO; - goto error_no_tlv; - } - skb_pull(ack_skb, (void *) tlv - (void *) ack_skb->data); -error_msg_to_dev: - kfree(cmd); -error_alloc: - return ack_skb; - -error_no_tlv: -error_cmd_failed: - kfree_skb(ack_skb); - kfree(cmd); - return ERR_PTR(result); -} - - -/* Firmware interface versions we support */ -enum { - I2400M_HDIv_MAJOR = 9, - I2400M_HDIv_MINOR = 1, - I2400M_HDIv_MINOR_2 = 2, -}; - - -/** - * i2400m_firmware_check - check firmware versions are compatible with - * the driver - * - * @i2400m: device descriptor - * - * Returns: 0 if ok, < 0 errno code an error and a message in the - * kernel log. - * - * Long function, but quite simple; first chunk launches the command - * and double checks the reply for the right TLV. Then we process the - * TLV (where the meat is). - * - * Once we process the TLV that gives us the firmware's interface - * version, we encode it and save it in i2400m->fw_version for future - * reference. - */ -int i2400m_firmware_check(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct i2400m_l3l4_hdr *cmd; - const struct i2400m_l3l4_hdr *ack; - size_t ack_len; - const struct i2400m_tlv_hdr *tlv; - const struct i2400m_tlv_l4_message_versions *l4mv; - char strerr[32]; - unsigned major, minor, branch; - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->type = cpu_to_le16(I2400M_MT_GET_LM_VERSION); - cmd->length = 0; - cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - if (IS_ERR(ack_skb)) { - result = PTR_ERR(ack_skb); - dev_err(dev, "Failed to issue 'get lm version' command: %-d\n", - result); - goto error_msg_to_dev; - } - ack = wimax_msg_data_len(ack_skb, &ack_len); - result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); - if (result < 0) { - dev_err(dev, "'get lm version' (0x%04x) command failed: " - "%d - %s\n", I2400M_MT_GET_LM_VERSION, result, - strerr); - goto error_cmd_failed; - } - tlv = i2400m_tlv_find(i2400m, ack->pl, ack_len - sizeof(*ack), - I2400M_TLV_L4_MESSAGE_VERSIONS, sizeof(*l4mv)); - if (tlv == NULL) { - dev_err(dev, "get lm version: TLV not found (0x%04x)\n", - I2400M_TLV_L4_MESSAGE_VERSIONS); - result = -EIO; - goto error_no_tlv; - } - l4mv = container_of(tlv, typeof(*l4mv), hdr); - major = le16_to_cpu(l4mv->major); - minor = le16_to_cpu(l4mv->minor); - branch = le16_to_cpu(l4mv->branch); - result = -EINVAL; - if (major != I2400M_HDIv_MAJOR) { - dev_err(dev, "unsupported major fw version " - "%u.%u.%u\n", major, minor, branch); - goto error_bad_major; - } - result = 0; - if (minor > I2400M_HDIv_MINOR_2 || minor < I2400M_HDIv_MINOR) - dev_warn(dev, "untested minor fw version %u.%u.%u\n", - major, minor, branch); - /* Yes, we ignore the branch -- we don't have to track it */ - i2400m->fw_version = major << 16 | minor; - dev_info(dev, "firmware interface version %u.%u.%u\n", - major, minor, branch); -error_bad_major: -error_no_tlv: -error_cmd_failed: - kfree_skb(ack_skb); -error_msg_to_dev: - kfree(cmd); -error_alloc: - return result; -} - - -/* - * Send an DoExitIdle command to the device to ask it to go out of - * basestation-idle mode. - * - * @i2400m: device descriptor - * - * This starts a renegotiation with the basestation that might involve - * another crypto handshake with user space. - * - * Returns: 0 if ok, < 0 errno code on error. - */ -int i2400m_cmd_exit_idle(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct i2400m_l3l4_hdr *cmd; - char strerr[32]; - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->type = cpu_to_le16(I2400M_MT_CMD_EXIT_IDLE); - cmd->length = 0; - cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - result = PTR_ERR(ack_skb); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'exit idle' command: %d\n", - result); - goto error_msg_to_dev; - } - result = i2400m_msg_check_status(wimax_msg_data(ack_skb), - strerr, sizeof(strerr)); - kfree_skb(ack_skb); -error_msg_to_dev: - kfree(cmd); -error_alloc: - return result; - -} - - -/* - * Query the device for its state, update the WiMAX stack's idea of it - * - * @i2400m: device descriptor - * - * Returns: 0 if ok, < 0 errno code on error. - * - * Executes a 'Get State' command and parses the returned - * TLVs. - * - * Because this is almost identical to a 'Report State', we use - * i2400m_report_state_hook() to parse the answer. This will set the - * carrier state, as well as the RF Kill switches state. - */ -static int i2400m_cmd_get_state(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct i2400m_l3l4_hdr *cmd; - const struct i2400m_l3l4_hdr *ack; - size_t ack_len; - char strerr[32]; - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->type = cpu_to_le16(I2400M_MT_GET_STATE); - cmd->length = 0; - cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'get state' command: %ld\n", - PTR_ERR(ack_skb)); - result = PTR_ERR(ack_skb); - goto error_msg_to_dev; - } - ack = wimax_msg_data_len(ack_skb, &ack_len); - result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); - if (result < 0) { - dev_err(dev, "'get state' (0x%04x) command failed: " - "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); - goto error_cmd_failed; - } - i2400m_report_state_hook(i2400m, ack, ack_len - sizeof(*ack), - "GET STATE"); - result = 0; - kfree_skb(ack_skb); -error_cmd_failed: -error_msg_to_dev: - kfree(cmd); -error_alloc: - return result; -} - -/** - * Set basic configuration settings - * - * @i2400m: device descriptor - * @args: array of pointers to the TLV headers to send for - * configuration (each followed by its payload). - * TLV headers and payloads must be properly initialized, with the - * right endianess (LE). - * @arg_size: number of pointers in the @args array - */ -static int i2400m_set_init_config(struct i2400m *i2400m, - const struct i2400m_tlv_hdr **arg, - size_t args) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct i2400m_l3l4_hdr *cmd; - char strerr[32]; - unsigned argc, argsize, tlv_size; - const struct i2400m_tlv_hdr *tlv_hdr; - void *buf, *itr; - - d_fnstart(3, dev, "(i2400m %p arg %p args %zu)\n", i2400m, arg, args); - result = 0; - if (args == 0) - goto none; - /* Compute the size of all the TLVs, so we can alloc a - * contiguous command block to copy them. */ - argsize = 0; - for (argc = 0; argc < args; argc++) { - tlv_hdr = arg[argc]; - argsize += sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length); - } - WARN_ON(argc >= 9); /* As per hw spec */ - - /* Alloc the space for the command and TLVs*/ - result = -ENOMEM; - buf = kzalloc(sizeof(*cmd) + argsize, GFP_KERNEL); - if (buf == NULL) - goto error_alloc; - cmd = buf; - cmd->type = cpu_to_le16(I2400M_MT_SET_INIT_CONFIG); - cmd->length = cpu_to_le16(argsize); - cmd->version = cpu_to_le16(I2400M_L3L4_VERSION); - - /* Copy the TLVs */ - itr = buf + sizeof(*cmd); - for (argc = 0; argc < args; argc++) { - tlv_hdr = arg[argc]; - tlv_size = sizeof(*tlv_hdr) + le16_to_cpu(tlv_hdr->length); - memcpy(itr, tlv_hdr, tlv_size); - itr += tlv_size; - } - - /* Send the message! */ - ack_skb = i2400m_msg_to_dev(i2400m, buf, sizeof(*cmd) + argsize); - result = PTR_ERR(ack_skb); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'init config' command: %d\n", - result); - - goto error_msg_to_dev; - } - result = i2400m_msg_check_status(wimax_msg_data(ack_skb), - strerr, sizeof(strerr)); - if (result < 0) - dev_err(dev, "'init config' (0x%04x) command failed: %d - %s\n", - I2400M_MT_SET_INIT_CONFIG, result, strerr); - kfree_skb(ack_skb); -error_msg_to_dev: - kfree(buf); -error_alloc: -none: - d_fnend(3, dev, "(i2400m %p arg %p args %zu) = %d\n", - i2400m, arg, args, result); - return result; - -} - -/** - * i2400m_set_idle_timeout - Set the device's idle mode timeout - * - * @i2400m: i2400m device descriptor - * - * @msecs: milliseconds for the timeout to enter idle mode. Between - * 100 to 300000 (5m); 0 to disable. In increments of 100. - * - * After this @msecs of the link being idle (no data being sent or - * received), the device will negotiate with the basestation entering - * idle mode for saving power. The connection is maintained, but - * getting out of it (done in tx.c) will require some negotiation, - * possible crypto re-handshake and a possible DHCP re-lease. - * - * Only available if fw_version >= 0x00090002. - * - * Returns: 0 if ok, < 0 errno code on error. - */ -int i2400m_set_idle_timeout(struct i2400m *i2400m, unsigned msecs) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct { - struct i2400m_l3l4_hdr hdr; - struct i2400m_tlv_config_idle_timeout cit; - } *cmd; - const struct i2400m_l3l4_hdr *ack; - size_t ack_len; - char strerr[32]; - - result = -ENOSYS; - if (i2400m_le_v1_3(i2400m)) - goto error_alloc; - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->hdr.type = cpu_to_le16(I2400M_MT_GET_STATE); - cmd->hdr.length = cpu_to_le16(sizeof(*cmd) - sizeof(cmd->hdr)); - cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); - - cmd->cit.hdr.type = - cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); - cmd->cit.hdr.length = cpu_to_le16(sizeof(cmd->cit.timeout)); - cmd->cit.timeout = cpu_to_le32(msecs); - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'set idle timeout' command: " - "%ld\n", PTR_ERR(ack_skb)); - result = PTR_ERR(ack_skb); - goto error_msg_to_dev; - } - ack = wimax_msg_data_len(ack_skb, &ack_len); - result = i2400m_msg_check_status(ack, strerr, sizeof(strerr)); - if (result < 0) { - dev_err(dev, "'set idle timeout' (0x%04x) command failed: " - "%d - %s\n", I2400M_MT_GET_STATE, result, strerr); - goto error_cmd_failed; - } - result = 0; - kfree_skb(ack_skb); -error_cmd_failed: -error_msg_to_dev: - kfree(cmd); -error_alloc: - return result; -} - - -/** - * i2400m_dev_initialize - Initialize the device once communications are ready - * - * @i2400m: device descriptor - * - * Returns: 0 if ok, < 0 errno code on error. - * - * Configures the device to work the way we like it. - * - * At the point of this call, the device is registered with the WiMAX - * and netdev stacks, firmware is uploaded and we can talk to the - * device normally. - */ -int i2400m_dev_initialize(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct i2400m_tlv_config_idle_parameters idle_params; - struct i2400m_tlv_config_idle_timeout idle_timeout; - struct i2400m_tlv_config_d2h_data_format df; - struct i2400m_tlv_config_dl_host_reorder dlhr; - const struct i2400m_tlv_hdr *args[9]; - unsigned argc = 0; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - if (i2400m_passive_mode) - goto out_passive; - /* Disable idle mode? (enabled by default) */ - if (i2400m_idle_mode_disabled) { - if (i2400m_le_v1_3(i2400m)) { - idle_params.hdr.type = - cpu_to_le16(I2400M_TLV_CONFIG_IDLE_PARAMETERS); - idle_params.hdr.length = cpu_to_le16( - sizeof(idle_params) - sizeof(idle_params.hdr)); - idle_params.idle_timeout = 0; - idle_params.idle_paging_interval = 0; - args[argc++] = &idle_params.hdr; - } else { - idle_timeout.hdr.type = - cpu_to_le16(I2400M_TLV_CONFIG_IDLE_TIMEOUT); - idle_timeout.hdr.length = cpu_to_le16( - sizeof(idle_timeout) - sizeof(idle_timeout.hdr)); - idle_timeout.timeout = 0; - args[argc++] = &idle_timeout.hdr; - } - } - if (i2400m_ge_v1_4(i2400m)) { - /* Enable extended RX data format? */ - df.hdr.type = - cpu_to_le16(I2400M_TLV_CONFIG_D2H_DATA_FORMAT); - df.hdr.length = cpu_to_le16( - sizeof(df) - sizeof(df.hdr)); - df.format = 1; - args[argc++] = &df.hdr; - - /* Enable RX data reordering? - * (switch flipped in rx.c:i2400m_rx_setup() after fw upload) */ - if (i2400m->rx_reorder) { - dlhr.hdr.type = - cpu_to_le16(I2400M_TLV_CONFIG_DL_HOST_REORDER); - dlhr.hdr.length = cpu_to_le16( - sizeof(dlhr) - sizeof(dlhr.hdr)); - dlhr.reorder = 1; - args[argc++] = &dlhr.hdr; - } - } - result = i2400m_set_init_config(i2400m, args, argc); - if (result < 0) - goto error; -out_passive: - /* - * Update state: Here it just calls a get state; parsing the - * result (System State TLV and RF Status TLV [done in the rx - * path hooks]) will set the hardware and software RF-Kill - * status. - */ - result = i2400m_cmd_get_state(i2400m); -error: - if (result < 0) - dev_err(dev, "failed to initialize the device: %d\n", result); - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; -} - - -/** - * i2400m_dev_shutdown - Shutdown a running device - * - * @i2400m: device descriptor - * - * Release resources acquired during the running of the device; in - * theory, should also tell the device to go to sleep, switch off the - * radio, all that, but at this point, in most cases (driver - * disconnection, reset handling) we can't even talk to the device. - */ -void i2400m_dev_shutdown(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} diff --git a/drivers/net/wimax/i2400m/debug-levels.h b/drivers/net/wimax/i2400m/debug-levels.h deleted file mode 100644 index 00942bb1489b..000000000000 --- a/drivers/net/wimax/i2400m/debug-levels.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Intel Wireless WiMAX Connection 2400m - * Debug levels control file for the i2400m module - * - * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - */ -#ifndef __debug_levels__h__ -#define __debug_levels__h__ - -/* Maximum compile and run time debug level for all submodules */ -#define D_MODULENAME i2400m -#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL - -#include <linux/wimax/debug.h> - -/* List of all the enabled modules */ -enum d_module { - D_SUBMODULE_DECLARE(control), - D_SUBMODULE_DECLARE(driver), - D_SUBMODULE_DECLARE(debugfs), - D_SUBMODULE_DECLARE(fw), - D_SUBMODULE_DECLARE(netdev), - D_SUBMODULE_DECLARE(rfkill), - D_SUBMODULE_DECLARE(rx), - D_SUBMODULE_DECLARE(sysfs), - D_SUBMODULE_DECLARE(tx), -}; - - -#endif /* #ifndef __debug_levels__h__ */ diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c deleted file mode 100644 index 1c640b41ea4c..000000000000 --- a/drivers/net/wimax/i2400m/debugfs.c +++ /dev/null @@ -1,253 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless WiMAX Connection 2400m - * Debugfs interfaces to manipulate driver and device information - * - * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - */ - -#include <linux/debugfs.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/spinlock.h> -#include <linux/device.h> -#include <linux/export.h> -#include "i2400m.h" - - -#define D_SUBMODULE debugfs -#include "debug-levels.h" - -static -int debugfs_netdev_queue_stopped_get(void *data, u64 *val) -{ - struct i2400m *i2400m = data; - *val = netif_queue_stopped(i2400m->wimax_dev.net_dev); - return 0; -} -DEFINE_DEBUGFS_ATTRIBUTE(fops_netdev_queue_stopped, - debugfs_netdev_queue_stopped_get, - NULL, "%llu\n"); - -/* - * We don't allow partial reads of this file, as then the reader would - * get weirdly confused data as it is updated. - * - * So or you read it all or nothing; if you try to read with an offset - * != 0, we consider you are done reading. - */ -static -ssize_t i2400m_rx_stats_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i2400m *i2400m = filp->private_data; - char buf[128]; - unsigned long flags; - - if (*ppos != 0) - return 0; - if (count < sizeof(buf)) - return -ENOSPC; - spin_lock_irqsave(&i2400m->rx_lock, flags); - snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n", - i2400m->rx_pl_num, i2400m->rx_pl_min, - i2400m->rx_pl_max, i2400m->rx_num, - i2400m->rx_size_acc, - i2400m->rx_size_min, i2400m->rx_size_max); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); -} - - -/* Any write clears the stats */ -static -ssize_t i2400m_rx_stats_write(struct file *filp, const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i2400m *i2400m = filp->private_data; - unsigned long flags; - - spin_lock_irqsave(&i2400m->rx_lock, flags); - i2400m->rx_pl_num = 0; - i2400m->rx_pl_max = 0; - i2400m->rx_pl_min = UINT_MAX; - i2400m->rx_num = 0; - i2400m->rx_size_acc = 0; - i2400m->rx_size_min = UINT_MAX; - i2400m->rx_size_max = 0; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - return count; -} - -static -const struct file_operations i2400m_rx_stats_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = i2400m_rx_stats_read, - .write = i2400m_rx_stats_write, - .llseek = default_llseek, -}; - - -/* See i2400m_rx_stats_read() */ -static -ssize_t i2400m_tx_stats_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i2400m *i2400m = filp->private_data; - char buf[128]; - unsigned long flags; - - if (*ppos != 0) - return 0; - if (count < sizeof(buf)) - return -ENOSPC; - spin_lock_irqsave(&i2400m->tx_lock, flags); - snprintf(buf, sizeof(buf), "%u %u %u %u %u %u %u\n", - i2400m->tx_pl_num, i2400m->tx_pl_min, - i2400m->tx_pl_max, i2400m->tx_num, - i2400m->tx_size_acc, - i2400m->tx_size_min, i2400m->tx_size_max); - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); -} - -/* Any write clears the stats */ -static -ssize_t i2400m_tx_stats_write(struct file *filp, const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i2400m *i2400m = filp->private_data; - unsigned long flags; - - spin_lock_irqsave(&i2400m->tx_lock, flags); - i2400m->tx_pl_num = 0; - i2400m->tx_pl_max = 0; - i2400m->tx_pl_min = UINT_MAX; - i2400m->tx_num = 0; - i2400m->tx_size_acc = 0; - i2400m->tx_size_min = UINT_MAX; - i2400m->tx_size_max = 0; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - return count; -} - -static -const struct file_operations i2400m_tx_stats_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = i2400m_tx_stats_read, - .write = i2400m_tx_stats_write, - .llseek = default_llseek, -}; - - -/* Write 1 to ask the device to go into suspend */ -static -int debugfs_i2400m_suspend_set(void *data, u64 val) -{ - int result; - struct i2400m *i2400m = data; - result = i2400m_cmd_enter_powersave(i2400m); - if (result >= 0) - result = 0; - return result; -} -DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_suspend, - NULL, debugfs_i2400m_suspend_set, - "%llu\n"); - -/* - * Reset the device - * - * Write 0 to ask the device to soft reset, 1 to cold reset, 2 to bus - * reset (as defined by enum i2400m_reset_type). - */ -static -int debugfs_i2400m_reset_set(void *data, u64 val) -{ - int result; - struct i2400m *i2400m = data; - enum i2400m_reset_type rt = val; - switch(rt) { - case I2400M_RT_WARM: - case I2400M_RT_COLD: - case I2400M_RT_BUS: - result = i2400m_reset(i2400m, rt); - if (result >= 0) - result = 0; - break; - default: - result = -EINVAL; - } - return result; -} -DEFINE_DEBUGFS_ATTRIBUTE(fops_i2400m_reset, - NULL, debugfs_i2400m_reset_set, - "%llu\n"); - -void i2400m_debugfs_add(struct i2400m *i2400m) -{ - struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry; - - dentry = debugfs_create_dir("i2400m", dentry); - i2400m->debugfs_dentry = dentry; - - d_level_register_debugfs("dl_", control, dentry); - d_level_register_debugfs("dl_", driver, dentry); - d_level_register_debugfs("dl_", debugfs, dentry); - d_level_register_debugfs("dl_", fw, dentry); - d_level_register_debugfs("dl_", netdev, dentry); - d_level_register_debugfs("dl_", rfkill, dentry); - d_level_register_debugfs("dl_", rx, dentry); - d_level_register_debugfs("dl_", tx, dentry); - - debugfs_create_size_t("tx_in", 0400, dentry, &i2400m->tx_in); - debugfs_create_size_t("tx_out", 0400, dentry, &i2400m->tx_out); - debugfs_create_u32("state", 0600, dentry, &i2400m->state); - - /* - * Trace received messages from user space - * - * In order to tap the bidirectional message stream in the - * 'msg' pipe, user space can read from the 'msg' pipe; - * however, due to limitations in libnl, we can't know what - * the different applications are sending down to the kernel. - * - * So we have this hack where the driver will echo any message - * received on the msg pipe from user space [through a call to - * wimax_dev->op_msg_from_user() into - * i2400m_op_msg_from_user()] into the 'trace' pipe that this - * driver creates. - * - * So then, reading from both the 'trace' and 'msg' pipes in - * user space will provide a full dump of the traffic. - * - * Write 1 to activate, 0 to clear. - * - * It is not really very atomic, but it is also not too - * critical. - */ - debugfs_create_u8("trace_msg_from_user", 0600, dentry, - &i2400m->trace_msg_from_user); - - debugfs_create_file("netdev_queue_stopped", 0400, dentry, i2400m, - &fops_netdev_queue_stopped); - - debugfs_create_file("rx_stats", 0600, dentry, i2400m, - &i2400m_rx_stats_fops); - - debugfs_create_file("tx_stats", 0600, dentry, i2400m, - &i2400m_tx_stats_fops); - - debugfs_create_file("suspend", 0200, dentry, i2400m, - &fops_i2400m_suspend); - - debugfs_create_file("reset", 0200, dentry, i2400m, &fops_i2400m_reset); -} - -void i2400m_debugfs_rm(struct i2400m *i2400m) -{ - debugfs_remove_recursive(i2400m->debugfs_dentry); -} diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c deleted file mode 100644 index ecb3fccca603..000000000000 --- a/drivers/net/wimax/i2400m/driver.c +++ /dev/null @@ -1,1002 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless WiMAX Connection 2400m - * Generic probe/disconnect, reset and message passing - * - * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * See i2400m.h for driver documentation. This contains helpers for - * the driver model glue [_setup()/_release()], handling device resets - * [_dev_reset_handle()], and the backends for the WiMAX stack ops - * reset [_op_reset()] and message from user [_op_msg_from_user()]. - * - * ROADMAP: - * - * i2400m_op_msg_from_user() - * i2400m_msg_to_dev() - * wimax_msg_to_user_send() - * - * i2400m_op_reset() - * i240m->bus_reset() - * - * i2400m_dev_reset_handle() - * __i2400m_dev_reset_handle() - * __i2400m_dev_stop() - * __i2400m_dev_start() - * - * i2400m_setup() - * i2400m->bus_setup() - * i2400m_bootrom_init() - * register_netdev() - * wimax_dev_add() - * i2400m_dev_start() - * __i2400m_dev_start() - * i2400m_dev_bootstrap() - * i2400m_tx_setup() - * i2400m->bus_dev_start() - * i2400m_firmware_check() - * i2400m_check_mac_addr() - * - * i2400m_release() - * i2400m_dev_stop() - * __i2400m_dev_stop() - * i2400m_dev_shutdown() - * i2400m->bus_dev_stop() - * i2400m_tx_release() - * i2400m->bus_release() - * wimax_dev_rm() - * unregister_netdev() - */ -#include "i2400m.h" -#include <linux/etherdevice.h> -#include <linux/wimax/i2400m.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/suspend.h> -#include <linux/slab.h> - -#define D_SUBMODULE driver -#include "debug-levels.h" - - -static char i2400m_debug_params[128]; -module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params), - 0644); -MODULE_PARM_DESC(debug, - "String of space-separated NAME:VALUE pairs, where NAMEs " - "are the different debug submodules and VALUE are the " - "initial debug value to set."); - -static char i2400m_barkers_params[128]; -module_param_string(barkers, i2400m_barkers_params, - sizeof(i2400m_barkers_params), 0644); -MODULE_PARM_DESC(barkers, - "String of comma-separated 32-bit values; each is " - "recognized as the value the device sends as a reboot " - "signal; values are appended to a list--setting one value " - "as zero cleans the existing list and starts a new one."); - -/* - * WiMAX stack operation: relay a message from user space - * - * @wimax_dev: device descriptor - * @pipe_name: named pipe the message is for - * @msg_buf: pointer to the message bytes - * @msg_len: length of the buffer - * @genl_info: passed by the generic netlink layer - * - * The WiMAX stack will call this function when a message was received - * from user space. - * - * For the i2400m, this is an L3L4 message, as specified in - * include/linux/wimax/i2400m.h, and thus prefixed with a 'struct - * i2400m_l3l4_hdr'. Driver (and device) expect the messages to be - * coded in Little Endian. - * - * This function just verifies that the header declaration and the - * payload are consistent and then deals with it, either forwarding it - * to the device or procesing it locally. - * - * In the i2400m, messages are basically commands that will carry an - * ack, so we use i2400m_msg_to_dev() and then deliver the ack back to - * user space. The rx.c code might intercept the response and use it - * to update the driver's state, but then it will pass it on so it can - * be relayed back to user space. - * - * Note that asynchronous events from the device are processed and - * sent to user space in rx.c. - */ -static -int i2400m_op_msg_from_user(struct wimax_dev *wimax_dev, - const char *pipe_name, - const void *msg_buf, size_t msg_len, - const struct genl_info *genl_info) -{ - int result; - struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - - d_fnstart(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p " - "msg_len %zu genl_info %p)\n", wimax_dev, i2400m, - msg_buf, msg_len, genl_info); - ack_skb = i2400m_msg_to_dev(i2400m, msg_buf, msg_len); - result = PTR_ERR(ack_skb); - if (IS_ERR(ack_skb)) - goto error_msg_to_dev; - result = wimax_msg_send(&i2400m->wimax_dev, ack_skb); -error_msg_to_dev: - d_fnend(4, dev, "(wimax_dev %p [i2400m %p] msg_buf %p msg_len %zu " - "genl_info %p) = %d\n", wimax_dev, i2400m, msg_buf, msg_len, - genl_info, result); - return result; -} - - -/* - * Context to wait for a reset to finalize - */ -struct i2400m_reset_ctx { - struct completion completion; - int result; -}; - - -/* - * WiMAX stack operation: reset a device - * - * @wimax_dev: device descriptor - * - * See the documentation for wimax_reset() and wimax_dev->op_reset for - * the requirements of this function. The WiMAX stack guarantees - * serialization on calls to this function. - * - * Do a warm reset on the device; if it fails, resort to a cold reset - * and return -ENODEV. On successful warm reset, we need to block - * until it is complete. - * - * The bus-driver implementation of reset takes care of falling back - * to cold reset if warm fails. - */ -static -int i2400m_op_reset(struct wimax_dev *wimax_dev) -{ - int result; - struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); - struct device *dev = i2400m_dev(i2400m); - struct i2400m_reset_ctx ctx = { - .completion = COMPLETION_INITIALIZER_ONSTACK(ctx.completion), - .result = 0, - }; - - d_fnstart(4, dev, "(wimax_dev %p)\n", wimax_dev); - mutex_lock(&i2400m->init_mutex); - i2400m->reset_ctx = &ctx; - mutex_unlock(&i2400m->init_mutex); - result = i2400m_reset(i2400m, I2400M_RT_WARM); - if (result < 0) - goto out; - result = wait_for_completion_timeout(&ctx.completion, 4*HZ); - if (result == 0) - result = -ETIMEDOUT; - else if (result > 0) - result = ctx.result; - /* if result < 0, pass it on */ - mutex_lock(&i2400m->init_mutex); - i2400m->reset_ctx = NULL; - mutex_unlock(&i2400m->init_mutex); -out: - d_fnend(4, dev, "(wimax_dev %p) = %d\n", wimax_dev, result); - return result; -} - - -/* - * Check the MAC address we got from boot mode is ok - * - * @i2400m: device descriptor - * - * Returns: 0 if ok, < 0 errno code on error. - */ -static -int i2400m_check_mac_addr(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *skb; - const struct i2400m_tlv_detailed_device_info *ddi; - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - skb = i2400m_get_device_info(i2400m); - if (IS_ERR(skb)) { - result = PTR_ERR(skb); - dev_err(dev, "Cannot verify MAC address, error reading: %d\n", - result); - goto error; - } - /* Extract MAC address */ - ddi = (void *) skb->data; - BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address)); - d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n", - ddi->mac_address); - if (!memcmp(net_dev->perm_addr, ddi->mac_address, - sizeof(ddi->mac_address))) - goto ok; - dev_warn(dev, "warning: device reports a different MAC address " - "to that of boot mode's\n"); - dev_warn(dev, "device reports %pM\n", ddi->mac_address); - dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); - if (is_zero_ether_addr(ddi->mac_address)) - dev_err(dev, "device reports an invalid MAC address, " - "not updating\n"); - else { - dev_warn(dev, "updating MAC address\n"); - net_dev->addr_len = ETH_ALEN; - memcpy(net_dev->perm_addr, ddi->mac_address, ETH_ALEN); - memcpy(net_dev->dev_addr, ddi->mac_address, ETH_ALEN); - } -ok: - result = 0; - kfree_skb(skb); -error: - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; -} - - -/** - * __i2400m_dev_start - Bring up driver communication with the device - * - * @i2400m: device descriptor - * @flags: boot mode flags - * - * Returns: 0 if ok, < 0 errno code on error. - * - * Uploads firmware and brings up all the resources needed to be able - * to communicate with the device. - * - * The workqueue has to be setup early, at least before RX handling - * (it's only real user for now) so it can process reports as they - * arrive. We also want to destroy it if we retry, to make sure it is - * flushed...easier like this. - * - * TX needs to be setup before the bus-specific code (otherwise on - * shutdown, the bus-tx code could try to access it). - */ -static -int __i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri flags) -{ - int result; - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - struct net_device *net_dev = wimax_dev->net_dev; - struct device *dev = i2400m_dev(i2400m); - int times = i2400m->bus_bm_retries; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); -retry: - result = i2400m_dev_bootstrap(i2400m, flags); - if (result < 0) { - dev_err(dev, "cannot bootstrap device: %d\n", result); - goto error_bootstrap; - } - result = i2400m_tx_setup(i2400m); - if (result < 0) - goto error_tx_setup; - result = i2400m_rx_setup(i2400m); - if (result < 0) - goto error_rx_setup; - i2400m->work_queue = create_singlethread_workqueue(wimax_dev->name); - if (i2400m->work_queue == NULL) { - result = -ENOMEM; - dev_err(dev, "cannot create workqueue\n"); - goto error_create_workqueue; - } - if (i2400m->bus_dev_start) { - result = i2400m->bus_dev_start(i2400m); - if (result < 0) - goto error_bus_dev_start; - } - i2400m->ready = 1; - wmb(); /* see i2400m->ready's documentation */ - /* process pending reports from the device */ - queue_work(i2400m->work_queue, &i2400m->rx_report_ws); - result = i2400m_firmware_check(i2400m); /* fw versions ok? */ - if (result < 0) - goto error_fw_check; - /* At this point is ok to send commands to the device */ - result = i2400m_check_mac_addr(i2400m); - if (result < 0) - goto error_check_mac_addr; - result = i2400m_dev_initialize(i2400m); - if (result < 0) - goto error_dev_initialize; - - /* We don't want any additional unwanted error recovery triggered - * from any other context so if anything went wrong before we come - * here, let's keep i2400m->error_recovery untouched and leave it to - * dev_reset_handle(). See dev_reset_handle(). */ - - atomic_dec(&i2400m->error_recovery); - /* Every thing works so far, ok, now we are ready to - * take error recovery if it's required. */ - - /* At this point, reports will come for the device and set it - * to the right state if it is different than UNINITIALIZED */ - d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", - net_dev, i2400m, result); - return result; - -error_dev_initialize: -error_check_mac_addr: -error_fw_check: - i2400m->ready = 0; - wmb(); /* see i2400m->ready's documentation */ - flush_workqueue(i2400m->work_queue); - if (i2400m->bus_dev_stop) - i2400m->bus_dev_stop(i2400m); -error_bus_dev_start: - destroy_workqueue(i2400m->work_queue); -error_create_workqueue: - i2400m_rx_release(i2400m); -error_rx_setup: - i2400m_tx_release(i2400m); -error_tx_setup: -error_bootstrap: - if (result == -EL3RST && times-- > 0) { - flags = I2400M_BRI_SOFT|I2400M_BRI_MAC_REINIT; - goto retry; - } - d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", - net_dev, i2400m, result); - return result; -} - - -static -int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags) -{ - int result = 0; - mutex_lock(&i2400m->init_mutex); /* Well, start the device */ - if (i2400m->updown == 0) { - result = __i2400m_dev_start(i2400m, bm_flags); - if (result >= 0) { - i2400m->updown = 1; - i2400m->alive = 1; - wmb();/* see i2400m->updown and i2400m->alive's doc */ - } - } - mutex_unlock(&i2400m->init_mutex); - return result; -} - - -/** - * i2400m_dev_stop - Tear down driver communication with the device - * - * @i2400m: device descriptor - * - * Returns: 0 if ok, < 0 errno code on error. - * - * Releases all the resources allocated to communicate with the - * device. Note we cannot destroy the workqueue earlier as until RX is - * fully destroyed, it could still try to schedule jobs. - */ -static -void __i2400m_dev_stop(struct i2400m *i2400m) -{ - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING); - i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST); - complete(&i2400m->msg_completion); - i2400m_net_wake_stop(i2400m); - i2400m_dev_shutdown(i2400m); - /* - * Make sure no report hooks are running *before* we stop the - * communication infrastructure with the device. - */ - i2400m->ready = 0; /* nobody can queue work anymore */ - wmb(); /* see i2400m->ready's documentation */ - flush_workqueue(i2400m->work_queue); - - if (i2400m->bus_dev_stop) - i2400m->bus_dev_stop(i2400m); - destroy_workqueue(i2400m->work_queue); - i2400m_rx_release(i2400m); - i2400m_tx_release(i2400m); - wimax_state_change(wimax_dev, WIMAX_ST_DOWN); - d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m); -} - - -/* - * Watch out -- we only need to stop if there is a need for it. The - * device could have reset itself and failed to come up again (see - * _i2400m_dev_reset_handle()). - */ -static -void i2400m_dev_stop(struct i2400m *i2400m) -{ - mutex_lock(&i2400m->init_mutex); - if (i2400m->updown) { - __i2400m_dev_stop(i2400m); - i2400m->updown = 0; - i2400m->alive = 0; - wmb(); /* see i2400m->updown and i2400m->alive's doc */ - } - mutex_unlock(&i2400m->init_mutex); -} - - -/* - * Listen to PM events to cache the firmware before suspend/hibernation - * - * When the device comes out of suspend, it might go into reset and - * firmware has to be uploaded again. At resume, most of the times, we - * can't load firmware images from disk, so we need to cache it. - * - * i2400m_fw_cache() will allocate a kobject and attach the firmware - * to it; that way we don't have to worry too much about the fw loader - * hitting a race condition. - * - * Note: modus operandi stolen from the Orinoco driver; thx. - */ -static -int i2400m_pm_notifier(struct notifier_block *notifier, - unsigned long pm_event, - void *unused) -{ - struct i2400m *i2400m = - container_of(notifier, struct i2400m, pm_notifier); - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p pm_event %lx)\n", i2400m, pm_event); - switch (pm_event) { - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - i2400m_fw_cache(i2400m); - break; - case PM_POST_RESTORE: - /* Restore from hibernation failed. We need to clean - * up in exactly the same way, so fall through. */ - case PM_POST_HIBERNATION: - case PM_POST_SUSPEND: - i2400m_fw_uncache(i2400m); - break; - - case PM_RESTORE_PREPARE: - default: - break; - } - d_fnend(3, dev, "(i2400m %p pm_event %lx) = void\n", i2400m, pm_event); - return NOTIFY_DONE; -} - - -/* - * pre-reset is called before a device is going on reset - * - * This has to be followed by a call to i2400m_post_reset(), otherwise - * bad things might happen. - */ -int i2400m_pre_reset(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - d_printf(1, dev, "pre-reset shut down\n"); - - mutex_lock(&i2400m->init_mutex); - if (i2400m->updown) { - netif_tx_disable(i2400m->wimax_dev.net_dev); - __i2400m_dev_stop(i2400m); - /* down't set updown to zero -- this way - * post_reset can restore properly */ - } - mutex_unlock(&i2400m->init_mutex); - if (i2400m->bus_release) - i2400m->bus_release(i2400m); - d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m); - return 0; -} -EXPORT_SYMBOL_GPL(i2400m_pre_reset); - - -/* - * Restore device state after a reset - * - * Do the work needed after a device reset to bring it up to the same - * state as it was before the reset. - * - * NOTE: this requires i2400m->init_mutex taken - */ -int i2400m_post_reset(struct i2400m *i2400m) -{ - int result = 0; - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - d_printf(1, dev, "post-reset start\n"); - if (i2400m->bus_setup) { - result = i2400m->bus_setup(i2400m); - if (result < 0) { - dev_err(dev, "bus-specific setup failed: %d\n", - result); - goto error_bus_setup; - } - } - mutex_lock(&i2400m->init_mutex); - if (i2400m->updown) { - result = __i2400m_dev_start( - i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); - if (result < 0) - goto error_dev_start; - } - mutex_unlock(&i2400m->init_mutex); - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; - -error_dev_start: - if (i2400m->bus_release) - i2400m->bus_release(i2400m); - /* even if the device was up, it could not be recovered, so we - * mark it as down. */ - i2400m->updown = 0; - wmb(); /* see i2400m->updown's documentation */ - mutex_unlock(&i2400m->init_mutex); -error_bus_setup: - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; -} -EXPORT_SYMBOL_GPL(i2400m_post_reset); - - -/* - * The device has rebooted; fix up the device and the driver - * - * Tear down the driver communication with the device, reload the - * firmware and reinitialize the communication with the device. - * - * If someone calls a reset when the device's firmware is down, in - * theory we won't see it because we are not listening. However, just - * in case, leave the code to handle it. - * - * If there is a reset context, use it; this means someone is waiting - * for us to tell him when the reset operation is complete and the - * device is ready to rock again. - * - * NOTE: if we are in the process of bringing up or down the - * communication with the device [running i2400m_dev_start() or - * _stop()], don't do anything, let it fail and handle it. - * - * This function is ran always in a thread context - * - * This function gets passed, as payload to i2400m_work() a 'const - * char *' ptr with a "reason" why the reset happened (for messages). - */ -static -void __i2400m_dev_reset_handle(struct work_struct *ws) -{ - struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws); - const char *reason = i2400m->reset_reason; - struct device *dev = i2400m_dev(i2400m); - struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; - int result; - - d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); - - i2400m->boot_mode = 1; - wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */ - - result = 0; - if (mutex_trylock(&i2400m->init_mutex) == 0) { - /* We are still in i2400m_dev_start() [let it fail] or - * i2400m_dev_stop() [we are shutting down anyway, so - * ignore it] or we are resetting somewhere else. */ - dev_err(dev, "device rebooted somewhere else?\n"); - i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST); - complete(&i2400m->msg_completion); - goto out; - } - - dev_err(dev, "%s: reinitializing driver\n", reason); - rmb(); - if (i2400m->updown) { - __i2400m_dev_stop(i2400m); - i2400m->updown = 0; - wmb(); /* see i2400m->updown's documentation */ - } - - if (i2400m->alive) { - result = __i2400m_dev_start(i2400m, - I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); - if (result < 0) { - dev_err(dev, "%s: cannot start the device: %d\n", - reason, result); - result = -EUCLEAN; - if (atomic_read(&i2400m->bus_reset_retries) - >= I2400M_BUS_RESET_RETRIES) { - result = -ENODEV; - dev_err(dev, "tried too many times to " - "reset the device, giving up\n"); - } - } - } - - if (i2400m->reset_ctx) { - ctx->result = result; - complete(&ctx->completion); - } - mutex_unlock(&i2400m->init_mutex); - if (result == -EUCLEAN) { - /* - * We come here because the reset during operational mode - * wasn't successfully done and need to proceed to a bus - * reset. For the dev_reset_handle() to be able to handle - * the reset event later properly, we restore boot_mode back - * to the state before previous reset. ie: just like we are - * issuing the bus reset for the first time - */ - i2400m->boot_mode = 0; - wmb(); - - atomic_inc(&i2400m->bus_reset_retries); - /* ops, need to clean up [w/ init_mutex not held] */ - result = i2400m_reset(i2400m, I2400M_RT_BUS); - if (result >= 0) - result = -ENODEV; - } else { - rmb(); - if (i2400m->alive) { - /* great, we expect the device state up and - * dev_start() actually brings the device state up */ - i2400m->updown = 1; - wmb(); - atomic_set(&i2400m->bus_reset_retries, 0); - } - } -out: - d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", - ws, i2400m, reason); -} - - -/** - * i2400m_dev_reset_handle - Handle a device's reset in a thread context - * - * Schedule a device reset handling out on a thread context, so it - * is safe to call from atomic context. We can't use the i2400m's - * queue as we are going to destroy it and reinitialize it as part of - * the driver bringup/bringup process. - * - * See __i2400m_dev_reset_handle() for details; that takes care of - * reinitializing the driver to handle the reset, calling into the - * bus-specific functions ops as needed. - */ -int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) -{ - i2400m->reset_reason = reason; - return schedule_work(&i2400m->reset_ws); -} -EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); - - - /* - * The actual work of error recovery. - * - * The current implementation of error recovery is to trigger a bus reset. - */ -static -void __i2400m_error_recovery(struct work_struct *ws) -{ - struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws); - - i2400m_reset(i2400m, I2400M_RT_BUS); -} - -/* - * Schedule a work struct for error recovery. - * - * The intention of error recovery is to bring back the device to some - * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to - * the device. The TX failure could mean a device bus stuck, so the current - * error recovery implementation is to trigger a bus reset to the device - * and hopefully it can bring back the device. - * - * The actual work of error recovery has to be in a thread context because - * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be - * destroyed by the error recovery mechanism (currently a bus reset). - * - * Also, there may be already a queue of TX works that all hit - * the -ETIMEOUT error condition because the device is stuck already. - * Since bus reset is used as the error recovery mechanism and we don't - * want consecutive bus resets simply because the multiple TX works - * in the queue all hit the same device erratum, the flag "error_recovery" - * is introduced for preventing unwanted consecutive bus resets. - * - * Error recovery shall only be invoked again if previous one was completed. - * The flag error_recovery is set when error recovery mechanism is scheduled, - * and is checked when we need to schedule another error recovery. If it is - * in place already, then we shouldn't schedule another one. - */ -void i2400m_error_recovery(struct i2400m *i2400m) -{ - if (atomic_add_return(1, &i2400m->error_recovery) == 1) - schedule_work(&i2400m->recovery_ws); - else - atomic_dec(&i2400m->error_recovery); -} -EXPORT_SYMBOL_GPL(i2400m_error_recovery); - -/* - * Alloc the command and ack buffers for boot mode - * - * Get the buffers needed to deal with boot mode messages. - */ -static -int i2400m_bm_buf_alloc(struct i2400m *i2400m) -{ - i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL); - if (i2400m->bm_cmd_buf == NULL) - goto error_bm_cmd_kzalloc; - i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL); - if (i2400m->bm_ack_buf == NULL) - goto error_bm_ack_buf_kzalloc; - return 0; - -error_bm_ack_buf_kzalloc: - kfree(i2400m->bm_cmd_buf); -error_bm_cmd_kzalloc: - return -ENOMEM; -} - - -/* - * Free boot mode command and ack buffers. - */ -static -void i2400m_bm_buf_free(struct i2400m *i2400m) -{ - kfree(i2400m->bm_ack_buf); - kfree(i2400m->bm_cmd_buf); -} - - -/** - * i2400m_init - Initialize a 'struct i2400m' from all zeroes - * - * This is a bus-generic API call. - */ -void i2400m_init(struct i2400m *i2400m) -{ - wimax_dev_init(&i2400m->wimax_dev); - - i2400m->boot_mode = 1; - i2400m->rx_reorder = 1; - init_waitqueue_head(&i2400m->state_wq); - - spin_lock_init(&i2400m->tx_lock); - i2400m->tx_pl_min = UINT_MAX; - i2400m->tx_size_min = UINT_MAX; - - spin_lock_init(&i2400m->rx_lock); - i2400m->rx_pl_min = UINT_MAX; - i2400m->rx_size_min = UINT_MAX; - INIT_LIST_HEAD(&i2400m->rx_reports); - INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work); - - mutex_init(&i2400m->msg_mutex); - init_completion(&i2400m->msg_completion); - - mutex_init(&i2400m->init_mutex); - /* wake_tx_ws is initialized in i2400m_tx_setup() */ - - INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle); - INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery); - - atomic_set(&i2400m->bus_reset_retries, 0); - - i2400m->alive = 0; - - /* initialize error_recovery to 1 for denoting we - * are not yet ready to take any error recovery */ - atomic_set(&i2400m->error_recovery, 1); -} -EXPORT_SYMBOL_GPL(i2400m_init); - - -int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) -{ - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - - /* - * Make sure we stop TXs and down the carrier before - * resetting; this is needed to avoid things like - * i2400m_wake_tx() scheduling stuff in parallel. - */ - if (net_dev->reg_state == NETREG_REGISTERED) { - netif_tx_disable(net_dev); - netif_carrier_off(net_dev); - } - return i2400m->bus_reset(i2400m, rt); -} -EXPORT_SYMBOL_GPL(i2400m_reset); - - -/** - * i2400m_setup - bus-generic setup function for the i2400m device - * - * @i2400m: device descriptor (bus-specific parts have been initialized) - * - * Returns: 0 if ok, < 0 errno code on error. - * - * Sets up basic device comunication infrastructure, boots the ROM to - * read the MAC address, registers with the WiMAX and network stacks - * and then brings up the device. - */ -int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - - snprintf(wimax_dev->name, sizeof(wimax_dev->name), - "i2400m-%s:%s", dev->bus->name, dev_name(dev)); - - result = i2400m_bm_buf_alloc(i2400m); - if (result < 0) { - dev_err(dev, "cannot allocate bootmode scratch buffers\n"); - goto error_bm_buf_alloc; - } - - if (i2400m->bus_setup) { - result = i2400m->bus_setup(i2400m); - if (result < 0) { - dev_err(dev, "bus-specific setup failed: %d\n", - result); - goto error_bus_setup; - } - } - - result = i2400m_bootrom_init(i2400m, bm_flags); - if (result < 0) { - dev_err(dev, "read mac addr: bootrom init " - "failed: %d\n", result); - goto error_bootrom_init; - } - result = i2400m_read_mac_addr(i2400m); - if (result < 0) - goto error_read_mac_addr; - eth_random_addr(i2400m->src_mac_addr); - - i2400m->pm_notifier.notifier_call = i2400m_pm_notifier; - register_pm_notifier(&i2400m->pm_notifier); - - result = register_netdev(net_dev); /* Okey dokey, bring it up */ - if (result < 0) { - dev_err(dev, "cannot register i2400m network device: %d\n", - result); - goto error_register_netdev; - } - netif_carrier_off(net_dev); - - i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user; - i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle; - i2400m->wimax_dev.op_reset = i2400m_op_reset; - - result = wimax_dev_add(&i2400m->wimax_dev, net_dev); - if (result < 0) - goto error_wimax_dev_add; - - /* Now setup all that requires a registered net and wimax device. */ - result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group); - if (result < 0) { - dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result); - goto error_sysfs_setup; - } - - i2400m_debugfs_add(i2400m); - - result = i2400m_dev_start(i2400m, bm_flags); - if (result < 0) - goto error_dev_start; - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; - -error_dev_start: - i2400m_debugfs_rm(i2400m); - sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, - &i2400m_dev_attr_group); -error_sysfs_setup: - wimax_dev_rm(&i2400m->wimax_dev); -error_wimax_dev_add: - unregister_netdev(net_dev); -error_register_netdev: - unregister_pm_notifier(&i2400m->pm_notifier); -error_read_mac_addr: -error_bootrom_init: - if (i2400m->bus_release) - i2400m->bus_release(i2400m); -error_bus_setup: - i2400m_bm_buf_free(i2400m); -error_bm_buf_alloc: - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; -} -EXPORT_SYMBOL_GPL(i2400m_setup); - - -/** - * i2400m_release - release the bus-generic driver resources - * - * Sends a disconnect message and undoes any setup done by i2400m_setup() - */ -void i2400m_release(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - netif_stop_queue(i2400m->wimax_dev.net_dev); - - i2400m_dev_stop(i2400m); - - cancel_work_sync(&i2400m->reset_ws); - cancel_work_sync(&i2400m->recovery_ws); - - i2400m_debugfs_rm(i2400m); - sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, - &i2400m_dev_attr_group); - wimax_dev_rm(&i2400m->wimax_dev); - unregister_netdev(i2400m->wimax_dev.net_dev); - unregister_pm_notifier(&i2400m->pm_notifier); - if (i2400m->bus_release) - i2400m->bus_release(i2400m); - i2400m_bm_buf_free(i2400m); - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} -EXPORT_SYMBOL_GPL(i2400m_release); - - -/* - * Debug levels control; see debug.h - */ -struct d_level D_LEVEL[] = { - D_SUBMODULE_DEFINE(control), - D_SUBMODULE_DEFINE(driver), - D_SUBMODULE_DEFINE(debugfs), - D_SUBMODULE_DEFINE(fw), - D_SUBMODULE_DEFINE(netdev), - D_SUBMODULE_DEFINE(rfkill), - D_SUBMODULE_DEFINE(rx), - D_SUBMODULE_DEFINE(sysfs), - D_SUBMODULE_DEFINE(tx), -}; -size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); - - -static -int __init i2400m_driver_init(void) -{ - d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400m_debug_params, - "i2400m.debug"); - return i2400m_barker_db_init(i2400m_barkers_params); -} -module_init(i2400m_driver_init); - -static -void __exit i2400m_driver_exit(void) -{ - i2400m_barker_db_exit(); -} -module_exit(i2400m_driver_exit); - -MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); -MODULE_DESCRIPTION("Intel 2400M WiMAX networking bus-generic driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c deleted file mode 100644 index 6c9a41bff2e0..000000000000 --- a/drivers/net/wimax/i2400m/fw.c +++ /dev/null @@ -1,1653 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * Firmware uploader - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Initial implementation - * - * - * THE PROCEDURE - * - * The 2400m and derived devices work in two modes: boot-mode or - * normal mode. In boot mode we can execute only a handful of commands - * targeted at uploading the firmware and launching it. - * - * The 2400m enters boot mode when it is first connected to the - * system, when it crashes and when you ask it to reboot. There are - * two submodes of the boot mode: signed and non-signed. Signed takes - * firmwares signed with a certain private key, non-signed takes any - * firmware. Normal hardware takes only signed firmware. - * - * On boot mode, in USB, we write to the device using the bulk out - * endpoint and read from it in the notification endpoint. - * - * Upon entrance to boot mode, the device sends (preceded with a few - * zero length packets (ZLPs) on the notification endpoint in USB) a - * reboot barker (4 le32 words with the same value). We ack it by - * sending the same barker to the device. The device acks with a - * reboot ack barker (4 le32 words with value I2400M_ACK_BARKER) and - * then is fully booted. At this point we can upload the firmware. - * - * Note that different iterations of the device and EEPROM - * configurations will send different [re]boot barkers; these are - * collected in i2400m_barker_db along with the firmware - * characteristics they require. - * - * This process is accomplished by the i2400m_bootrom_init() - * function. All the device interaction happens through the - * i2400m_bm_cmd() [boot mode command]. Special return values will - * indicate if the device did reset during the process. - * - * After this, we read the MAC address and then (if needed) - * reinitialize the device. We need to read it ahead of time because - * in the future, we might not upload the firmware until userspace - * 'ifconfig up's the device. - * - * We can then upload the firmware file. The file is composed of a BCF - * header (basic data, keys and signatures) and a list of write - * commands and payloads. Optionally more BCF headers might follow the - * main payload. We first upload the header [i2400m_dnload_init()] and - * then pass the commands and payloads verbatim to the i2400m_bm_cmd() - * function [i2400m_dnload_bcf()]. Then we tell the device to jump to - * the new firmware [i2400m_dnload_finalize()]. - * - * Once firmware is uploaded, we are good to go :) - * - * When we don't know in which mode we are, we first try by sending a - * warm reset request that will take us to boot-mode. If we time out - * waiting for a reboot barker, that means maybe we are already in - * boot mode, so we send a reboot barker. - * - * COMMAND EXECUTION - * - * This code (and process) is single threaded; for executing commands, - * we post a URB to the notification endpoint, post the command, wait - * for data on the notification buffer. We don't need to worry about - * others as we know we are the only ones in there. - * - * BACKEND IMPLEMENTATION - * - * This code is bus-generic; the bus-specific driver provides back end - * implementations to send a boot mode command to the device and to - * read an acknolwedgement from it (or an asynchronous notification) - * from it. - * - * FIRMWARE LOADING - * - * Note that in some cases, we can't just load a firmware file (for - * example, when resuming). For that, we might cache the firmware - * file. Thus, when doing the bootstrap, if there is a cache firmware - * file, it is used; if not, loading from disk is attempted. - * - * ROADMAP - * - * i2400m_barker_db_init Called by i2400m_driver_init() - * i2400m_barker_db_add - * - * i2400m_barker_db_exit Called by i2400m_driver_exit() - * - * i2400m_dev_bootstrap Called by __i2400m_dev_start() - * request_firmware - * i2400m_fw_bootstrap - * i2400m_fw_check - * i2400m_fw_hdr_check - * i2400m_fw_dnload - * release_firmware - * - * i2400m_fw_dnload - * i2400m_bootrom_init - * i2400m_bm_cmd - * i2400m_reset - * i2400m_dnload_init - * i2400m_dnload_init_signed - * i2400m_dnload_init_nonsigned - * i2400m_download_chunk - * i2400m_bm_cmd - * i2400m_dnload_bcf - * i2400m_bm_cmd - * i2400m_dnload_finalize - * i2400m_bm_cmd - * - * i2400m_bm_cmd - * i2400m->bus_bm_cmd_send() - * i2400m->bus_bm_wait_for_ack - * __i2400m_bm_ack_verify - * i2400m_is_boot_barker - * - * i2400m_bm_cmd_prepare Used by bus-drivers to prep - * commands before sending - * - * i2400m_pm_notifier Called on Power Management events - * i2400m_fw_cache - * i2400m_fw_uncache - */ -#include <linux/firmware.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/usb.h> -#include <linux/export.h> -#include "i2400m.h" - - -#define D_SUBMODULE fw -#include "debug-levels.h" - - -static const __le32 i2400m_ACK_BARKER[4] = { - cpu_to_le32(I2400M_ACK_BARKER), - cpu_to_le32(I2400M_ACK_BARKER), - cpu_to_le32(I2400M_ACK_BARKER), - cpu_to_le32(I2400M_ACK_BARKER) -}; - - -/** - * Prepare a boot-mode command for delivery - * - * @cmd: pointer to bootrom header to prepare - * - * Computes checksum if so needed. After calling this function, DO NOT - * modify the command or header as the checksum won't work anymore. - * - * We do it from here because some times we cannot do it in the - * original context the command was sent (it is a const), so when we - * copy it to our staging buffer, we add the checksum there. - */ -void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *cmd) -{ - if (i2400m_brh_get_use_checksum(cmd)) { - int i; - u32 checksum = 0; - const u32 *checksum_ptr = (void *) cmd->payload; - for (i = 0; i < cmd->data_size / 4; i++) - checksum += cpu_to_le32(*checksum_ptr++); - checksum += cmd->command + cmd->target_addr + cmd->data_size; - cmd->block_checksum = cpu_to_le32(checksum); - } -} -EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare); - - -/* - * Database of known barkers. - * - * A barker is what the device sends indicating he is ready to be - * bootloaded. Different versions of the device will send different - * barkers. Depending on the barker, it might mean the device wants - * some kind of firmware or the other. - */ -static struct i2400m_barker_db { - __le32 data[4]; -} *i2400m_barker_db; -static size_t i2400m_barker_db_used, i2400m_barker_db_size; - - -static -int i2400m_zrealloc_2x(void **ptr, size_t *_count, size_t el_size, - gfp_t gfp_flags) -{ - size_t old_count = *_count, - new_count = old_count ? 2 * old_count : 2, - old_size = el_size * old_count, - new_size = el_size * new_count; - void *nptr = krealloc(*ptr, new_size, gfp_flags); - if (nptr) { - /* zero the other half or the whole thing if old_count - * was zero */ - if (old_size == 0) - memset(nptr, 0, new_size); - else - memset(nptr + old_size, 0, old_size); - *_count = new_count; - *ptr = nptr; - return 0; - } else - return -ENOMEM; -} - - -/* - * Add a barker to the database - * - * This cannot used outside of this module and only at at module_init - * time. This is to avoid the need to do locking. - */ -static -int i2400m_barker_db_add(u32 barker_id) -{ - int result; - - struct i2400m_barker_db *barker; - if (i2400m_barker_db_used >= i2400m_barker_db_size) { - result = i2400m_zrealloc_2x( - (void **) &i2400m_barker_db, &i2400m_barker_db_size, - sizeof(i2400m_barker_db[0]), GFP_KERNEL); - if (result < 0) - return result; - } - barker = i2400m_barker_db + i2400m_barker_db_used++; - barker->data[0] = le32_to_cpu(barker_id); - barker->data[1] = le32_to_cpu(barker_id); - barker->data[2] = le32_to_cpu(barker_id); - barker->data[3] = le32_to_cpu(barker_id); - return 0; -} - - -void i2400m_barker_db_exit(void) -{ - kfree(i2400m_barker_db); - i2400m_barker_db = NULL; - i2400m_barker_db_size = 0; - i2400m_barker_db_used = 0; -} - - -/* - * Helper function to add all the known stable barkers to the barker - * database. - */ -static -int i2400m_barker_db_known_barkers(void) -{ - int result; - - result = i2400m_barker_db_add(I2400M_NBOOT_BARKER); - if (result < 0) - goto error_add; - result = i2400m_barker_db_add(I2400M_SBOOT_BARKER); - if (result < 0) - goto error_add; - result = i2400m_barker_db_add(I2400M_SBOOT_BARKER_6050); - if (result < 0) - goto error_add; -error_add: - return result; -} - - -/* - * Initialize the barker database - * - * This can only be used from the module_init function for this - * module; this is to avoid the need to do locking. - * - * @options: command line argument with extra barkers to - * recognize. This is a comma-separated list of 32-bit hex - * numbers. They are appended to the existing list. Setting 0 - * cleans the existing list and starts a new one. - */ -int i2400m_barker_db_init(const char *_options) -{ - int result; - char *options = NULL, *options_orig, *token; - - i2400m_barker_db = NULL; - i2400m_barker_db_size = 0; - i2400m_barker_db_used = 0; - - result = i2400m_barker_db_known_barkers(); - if (result < 0) - goto error_add; - /* parse command line options from i2400m.barkers */ - if (_options != NULL) { - unsigned barker; - - options_orig = kstrdup(_options, GFP_KERNEL); - if (options_orig == NULL) { - result = -ENOMEM; - goto error_parse; - } - options = options_orig; - - while ((token = strsep(&options, ",")) != NULL) { - if (*token == '\0') /* eat joint commas */ - continue; - if (sscanf(token, "%x", &barker) != 1 - || barker > 0xffffffff) { - printk(KERN_ERR "%s: can't recognize " - "i2400m.barkers value '%s' as " - "a 32-bit number\n", - __func__, token); - result = -EINVAL; - goto error_parse; - } - if (barker == 0) { - /* clean list and start new */ - i2400m_barker_db_exit(); - continue; - } - result = i2400m_barker_db_add(barker); - if (result < 0) - goto error_parse_add; - } - kfree(options_orig); - } - return 0; - -error_parse_add: -error_parse: - kfree(options_orig); -error_add: - kfree(i2400m_barker_db); - return result; -} - - -/* - * Recognize a boot barker - * - * @buf: buffer where the boot barker. - * @buf_size: size of the buffer (has to be 16 bytes). It is passed - * here so the function can check it for the caller. - * - * Note that as a side effect, upon identifying the obtained boot - * barker, this function will set i2400m->barker to point to the right - * barker database entry. Subsequent calls to the function will result - * in verifying that the same type of boot barker is returned when the - * device [re]boots (as long as the same device instance is used). - * - * Return: 0 if @buf matches a known boot barker. -ENOENT if the - * buffer in @buf doesn't match any boot barker in the database or - * -EILSEQ if the buffer doesn't have the right size. - */ -int i2400m_is_boot_barker(struct i2400m *i2400m, - const void *buf, size_t buf_size) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct i2400m_barker_db *barker; - int i; - - result = -ENOENT; - if (buf_size != sizeof(i2400m_barker_db[i].data)) - return result; - - /* Short circuit if we have already discovered the barker - * associated with the device. */ - if (i2400m->barker && - !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) - return 0; - - for (i = 0; i < i2400m_barker_db_used; i++) { - barker = &i2400m_barker_db[i]; - BUILD_BUG_ON(sizeof(barker->data) != 16); - if (memcmp(buf, barker->data, sizeof(barker->data))) - continue; - - if (i2400m->barker == NULL) { - i2400m->barker = barker; - d_printf(1, dev, "boot barker set to #%u/%08x\n", - i, le32_to_cpu(barker->data[0])); - if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER)) - i2400m->sboot = 0; - else - i2400m->sboot = 1; - } else if (i2400m->barker != barker) { - dev_err(dev, "HW inconsistency: device " - "reports a different boot barker " - "than set (from %08x to %08x)\n", - le32_to_cpu(i2400m->barker->data[0]), - le32_to_cpu(barker->data[0])); - result = -EIO; - } else - d_printf(2, dev, "boot barker confirmed #%u/%08x\n", - i, le32_to_cpu(barker->data[0])); - result = 0; - break; - } - return result; -} -EXPORT_SYMBOL_GPL(i2400m_is_boot_barker); - - -/* - * Verify the ack data received - * - * Given a reply to a boot mode command, chew it and verify everything - * is ok. - * - * @opcode: opcode which generated this ack. For error messages. - * @ack: pointer to ack data we received - * @ack_size: size of that data buffer - * @flags: I2400M_BM_CMD_* flags we called the command with. - * - * Way too long function -- maybe it should be further split - */ -static -ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode, - struct i2400m_bootrom_header *ack, - size_t ack_size, int flags) -{ - ssize_t result = -ENOMEM; - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(8, dev, "(i2400m %p opcode %d ack %p size %zu)\n", - i2400m, opcode, ack, ack_size); - if (ack_size < sizeof(*ack)) { - result = -EIO; - dev_err(dev, "boot-mode cmd %d: HW BUG? notification didn't " - "return enough data (%zu bytes vs %zu expected)\n", - opcode, ack_size, sizeof(*ack)); - goto error_ack_short; - } - result = i2400m_is_boot_barker(i2400m, ack, ack_size); - if (result >= 0) { - result = -ERESTARTSYS; - d_printf(6, dev, "boot-mode cmd %d: HW boot barker\n", opcode); - goto error_reboot; - } - if (ack_size == sizeof(i2400m_ACK_BARKER) - && memcmp(ack, i2400m_ACK_BARKER, sizeof(*ack)) == 0) { - result = -EISCONN; - d_printf(3, dev, "boot-mode cmd %d: HW reboot ack barker\n", - opcode); - goto error_reboot_ack; - } - result = 0; - if (flags & I2400M_BM_CMD_RAW) - goto out_raw; - ack->data_size = le32_to_cpu(ack->data_size); - ack->target_addr = le32_to_cpu(ack->target_addr); - ack->block_checksum = le32_to_cpu(ack->block_checksum); - d_printf(5, dev, "boot-mode cmd %d: notification for opcode %u " - "response %u csum %u rr %u da %u\n", - opcode, i2400m_brh_get_opcode(ack), - i2400m_brh_get_response(ack), - i2400m_brh_get_use_checksum(ack), - i2400m_brh_get_response_required(ack), - i2400m_brh_get_direct_access(ack)); - result = -EIO; - if (i2400m_brh_get_signature(ack) != 0xcbbc) { - dev_err(dev, "boot-mode cmd %d: HW BUG? wrong signature " - "0x%04x\n", opcode, i2400m_brh_get_signature(ack)); - goto error_ack_signature; - } - if (opcode != -1 && opcode != i2400m_brh_get_opcode(ack)) { - dev_err(dev, "boot-mode cmd %d: HW BUG? " - "received response for opcode %u, expected %u\n", - opcode, i2400m_brh_get_opcode(ack), opcode); - goto error_ack_opcode; - } - if (i2400m_brh_get_response(ack) != 0) { /* failed? */ - dev_err(dev, "boot-mode cmd %d: error; hw response %u\n", - opcode, i2400m_brh_get_response(ack)); - goto error_ack_failed; - } - if (ack_size < ack->data_size + sizeof(*ack)) { - dev_err(dev, "boot-mode cmd %d: SW BUG " - "driver provided only %zu bytes for %zu bytes " - "of data\n", opcode, ack_size, - (size_t) le32_to_cpu(ack->data_size) + sizeof(*ack)); - goto error_ack_short_buffer; - } - result = ack_size; - /* Don't you love this stack of empty targets? Well, I don't - * either, but it helps track exactly who comes in here and - * why :) */ -error_ack_short_buffer: -error_ack_failed: -error_ack_opcode: -error_ack_signature: -out_raw: -error_reboot_ack: -error_reboot: -error_ack_short: - d_fnend(8, dev, "(i2400m %p opcode %d ack %p size %zu) = %d\n", - i2400m, opcode, ack, ack_size, (int) result); - return result; -} - - -/** - * i2400m_bm_cmd - Execute a boot mode command - * - * @cmd: buffer containing the command data (pointing at the header). - * This data can be ANYWHERE (for USB, we will copy it to an - * specific buffer). Make sure everything is in proper little - * endian. - * - * A raw buffer can be also sent, just cast it and set flags to - * I2400M_BM_CMD_RAW. - * - * This function will generate a checksum for you if the - * checksum bit in the command is set (unless I2400M_BM_CMD_RAW - * is set). - * - * You can use the i2400m->bm_cmd_buf to stage your commands and - * send them. - * - * If NULL, no command is sent (we just wait for an ack). - * - * @cmd_size: size of the command. Will be auto padded to the - * bus-specific drivers padding requirements. - * - * @ack: buffer where to place the acknowledgement. If it is a regular - * command response, all fields will be returned with the right, - * native endianess. - * - * You *cannot* use i2400m->bm_ack_buf for this buffer. - * - * @ack_size: size of @ack, 16 aligned; you need to provide at least - * sizeof(*ack) bytes and then enough to contain the return data - * from the command - * - * @flags: see I2400M_BM_CMD_* above. - * - * @returns: bytes received by the notification; if < 0, an errno code - * denoting an error or: - * - * -ERESTARTSYS The device has rebooted - * - * Executes a boot-mode command and waits for a response, doing basic - * validation on it; if a zero length response is received, it retries - * waiting for a response until a non-zero one is received (timing out - * after %I2400M_BOOT_RETRIES retries). - */ -static -ssize_t i2400m_bm_cmd(struct i2400m *i2400m, - const struct i2400m_bootrom_header *cmd, size_t cmd_size, - struct i2400m_bootrom_header *ack, size_t ack_size, - int flags) -{ - ssize_t result = -ENOMEM, rx_bytes; - struct device *dev = i2400m_dev(i2400m); - int opcode = cmd == NULL ? -1 : i2400m_brh_get_opcode(cmd); - - d_fnstart(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu)\n", - i2400m, cmd, cmd_size, ack, ack_size); - BUG_ON(ack_size < sizeof(*ack)); - BUG_ON(i2400m->boot_mode == 0); - - if (cmd != NULL) { /* send the command */ - result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags); - if (result < 0) - goto error_cmd_send; - if ((flags & I2400M_BM_CMD_RAW) == 0) - d_printf(5, dev, - "boot-mode cmd %d csum %u rr %u da %u: " - "addr 0x%04x size %u block csum 0x%04x\n", - opcode, i2400m_brh_get_use_checksum(cmd), - i2400m_brh_get_response_required(cmd), - i2400m_brh_get_direct_access(cmd), - cmd->target_addr, cmd->data_size, - cmd->block_checksum); - } - result = i2400m->bus_bm_wait_for_ack(i2400m, ack, ack_size); - if (result < 0) { - dev_err(dev, "boot-mode cmd %d: error waiting for an ack: %d\n", - opcode, (int) result); /* bah, %zd doesn't work */ - goto error_wait_for_ack; - } - rx_bytes = result; - /* verify the ack and read more if necessary [result is the - * final amount of bytes we get in the ack] */ - result = __i2400m_bm_ack_verify(i2400m, opcode, ack, ack_size, flags); - if (result < 0) - goto error_bad_ack; - /* Don't you love this stack of empty targets? Well, I don't - * either, but it helps track exactly who comes in here and - * why :) */ - result = rx_bytes; -error_bad_ack: -error_wait_for_ack: -error_cmd_send: - d_fnend(6, dev, "(i2400m %p cmd %p size %zu ack %p size %zu) = %d\n", - i2400m, cmd, cmd_size, ack, ack_size, (int) result); - return result; -} - - -/** - * i2400m_download_chunk - write a single chunk of data to the device's memory - * - * @i2400m: device descriptor - * @buf: the buffer to write - * @buf_len: length of the buffer to write - * @addr: address in the device memory space - * @direct: bootrom write mode - * @do_csum: should a checksum validation be performed - */ -static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, - size_t __chunk_len, unsigned long addr, - unsigned int direct, unsigned int do_csum) -{ - int ret; - size_t chunk_len = ALIGN(__chunk_len, I2400M_PL_ALIGN); - struct device *dev = i2400m_dev(i2400m); - struct { - struct i2400m_bootrom_header cmd; - u8 cmd_payload[]; - } __packed *buf; - struct i2400m_bootrom_header ack; - - d_fnstart(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " - "direct %u do_csum %u)\n", i2400m, chunk, __chunk_len, - addr, direct, do_csum); - buf = i2400m->bm_cmd_buf; - memcpy(buf->cmd_payload, chunk, __chunk_len); - memset(buf->cmd_payload + __chunk_len, 0xad, chunk_len - __chunk_len); - - buf->cmd.command = i2400m_brh_command(I2400M_BRH_WRITE, - __chunk_len & 0x3 ? 0 : do_csum, - __chunk_len & 0xf ? 0 : direct); - buf->cmd.target_addr = cpu_to_le32(addr); - buf->cmd.data_size = cpu_to_le32(__chunk_len); - ret = i2400m_bm_cmd(i2400m, &buf->cmd, sizeof(buf->cmd) + chunk_len, - &ack, sizeof(ack), 0); - if (ret >= 0) - ret = 0; - d_fnend(5, dev, "(i2400m %p chunk %p __chunk_len %zu addr 0x%08lx " - "direct %u do_csum %u) = %d\n", i2400m, chunk, __chunk_len, - addr, direct, do_csum, ret); - return ret; -} - - -/* - * Download a BCF file's sections to the device - * - * @i2400m: device descriptor - * @bcf: pointer to firmware data (first header followed by the - * payloads). Assumed verified and consistent. - * @bcf_len: length (in bytes) of the @bcf buffer. - * - * Returns: < 0 errno code on error or the offset to the jump instruction. - * - * Given a BCF file, downloads each section (a command and a payload) - * to the device's address space. Actually, it just executes each - * command i the BCF file. - * - * The section size has to be aligned to 4 bytes AND the padding has - * to be taken from the firmware file, as the signature takes it into - * account. - */ -static -ssize_t i2400m_dnload_bcf(struct i2400m *i2400m, - const struct i2400m_bcf_hdr *bcf, size_t bcf_len) -{ - ssize_t ret; - struct device *dev = i2400m_dev(i2400m); - size_t offset, /* iterator offset */ - data_size, /* Size of the data payload */ - section_size, /* Size of the whole section (cmd + payload) */ - section = 1; - const struct i2400m_bootrom_header *bh; - struct i2400m_bootrom_header ack; - - d_fnstart(3, dev, "(i2400m %p bcf %p bcf_len %zu)\n", - i2400m, bcf, bcf_len); - /* Iterate over the command blocks in the BCF file that start - * after the header */ - offset = le32_to_cpu(bcf->header_len) * sizeof(u32); - while (1) { /* start sending the file */ - bh = (void *) bcf + offset; - data_size = le32_to_cpu(bh->data_size); - section_size = ALIGN(sizeof(*bh) + data_size, 4); - d_printf(7, dev, - "downloading section #%zu (@%zu %zu B) to 0x%08x\n", - section, offset, sizeof(*bh) + data_size, - le32_to_cpu(bh->target_addr)); - /* - * We look for JUMP cmd from the bootmode header, - * either I2400M_BRH_SIGNED_JUMP for secure boot - * or I2400M_BRH_JUMP for unsecure boot, the last chunk - * should be the bootmode header with JUMP cmd. - */ - if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP || - i2400m_brh_get_opcode(bh) == I2400M_BRH_JUMP) { - d_printf(5, dev, "jump found @%zu\n", offset); - break; - } - if (offset + section_size > bcf_len) { - dev_err(dev, "fw %s: bad section #%zu, " - "end (@%zu) beyond EOF (@%zu)\n", - i2400m->fw_name, section, - offset + section_size, bcf_len); - ret = -EINVAL; - goto error_section_beyond_eof; - } - __i2400m_msleep(20); - ret = i2400m_bm_cmd(i2400m, bh, section_size, - &ack, sizeof(ack), I2400M_BM_CMD_RAW); - if (ret < 0) { - dev_err(dev, "fw %s: section #%zu (@%zu %zu B) " - "failed %d\n", i2400m->fw_name, section, - offset, sizeof(*bh) + data_size, (int) ret); - goto error_send; - } - offset += section_size; - section++; - } - ret = offset; -error_section_beyond_eof: -error_send: - d_fnend(3, dev, "(i2400m %p bcf %p bcf_len %zu) = %d\n", - i2400m, bcf, bcf_len, (int) ret); - return ret; -} - - -/* - * Indicate if the device emitted a reboot barker that indicates - * "signed boot" - */ -static -unsigned i2400m_boot_is_signed(struct i2400m *i2400m) -{ - return likely(i2400m->sboot); -} - - -/* - * Do the final steps of uploading firmware - * - * @bcf_hdr: BCF header we are actually using - * @bcf: pointer to the firmware image (which matches the first header - * that is followed by the actual payloads). - * @offset: [byte] offset into @bcf for the command we need to send. - * - * Depending on the boot mode (signed vs non-signed), different - * actions need to be taken. - */ -static -int i2400m_dnload_finalize(struct i2400m *i2400m, - const struct i2400m_bcf_hdr *bcf_hdr, - const struct i2400m_bcf_hdr *bcf, size_t offset) -{ - int ret = 0; - struct device *dev = i2400m_dev(i2400m); - struct i2400m_bootrom_header *cmd, ack; - struct { - struct i2400m_bootrom_header cmd; - u8 cmd_pl[0]; - } __packed *cmd_buf; - size_t signature_block_offset, signature_block_size; - - d_fnstart(3, dev, "offset %zu\n", offset); - cmd = (void *) bcf + offset; - if (i2400m_boot_is_signed(i2400m) == 0) { - struct i2400m_bootrom_header jump_ack; - d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n", - le32_to_cpu(cmd->target_addr)); - cmd_buf = i2400m->bm_cmd_buf; - memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd)); - cmd = &cmd_buf->cmd; - /* now cmd points to the actual bootrom_header in cmd_buf */ - i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP); - cmd->data_size = 0; - ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), - &jump_ack, sizeof(jump_ack), 0); - } else { - d_printf(1, dev, "secure boot, jumping to 0x%08x\n", - le32_to_cpu(cmd->target_addr)); - cmd_buf = i2400m->bm_cmd_buf; - memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd)); - signature_block_offset = - sizeof(*bcf_hdr) - + le32_to_cpu(bcf_hdr->key_size) * sizeof(u32) - + le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32); - signature_block_size = - le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32); - memcpy(cmd_buf->cmd_pl, - (void *) bcf_hdr + signature_block_offset, - signature_block_size); - ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, - sizeof(cmd_buf->cmd) + signature_block_size, - &ack, sizeof(ack), I2400M_BM_CMD_RAW); - } - d_fnend(3, dev, "returning %d\n", ret); - return ret; -} - - -/** - * i2400m_bootrom_init - Reboots a powered device into boot mode - * - * @i2400m: device descriptor - * @flags: - * I2400M_BRI_SOFT: a reboot barker has been seen - * already, so don't wait for it. - * - * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait - * for a reboot barker notification. This is a one shot; if - * the state machine needs to send a reboot command it will. - * - * Returns: - * - * < 0 errno code on error, 0 if ok. - * - * Description: - * - * Tries hard enough to put the device in boot-mode. There are two - * main phases to this: - * - * a. (1) send a reboot command and (2) get a reboot barker - * - * b. (1) echo/ack the reboot sending the reboot barker back and (2) - * getting an ack barker in return - * - * We want to skip (a) in some cases [soft]. The state machine is - * horrible, but it is basically: on each phase, send what has to be - * sent (if any), wait for the answer and act on the answer. We might - * have to backtrack and retry, so we keep a max tries counter for - * that. - * - * It sucks because we don't know ahead of time which is going to be - * the reboot barker (the device might send different ones depending - * on its EEPROM config) and once the device reboots and waits for the - * echo/ack reboot barker being sent back, it doesn't understand - * anything else. So we can be left at the point where we don't know - * what to send to it -- cold reset and bus reset seem to have little - * effect. So the function iterates (in this case) through all the - * known barkers and tries them all until an ACK is - * received. Otherwise, it gives up. - * - * If we get a timeout after sending a warm reset, we do it again. - */ -int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct i2400m_bootrom_header *cmd; - struct i2400m_bootrom_header ack; - int count = i2400m->bus_bm_retries; - int ack_timeout_cnt = 1; - unsigned i; - - BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_barker_db[0].data)); - BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER)); - - d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags); - result = -ENOMEM; - cmd = i2400m->bm_cmd_buf; - if (flags & I2400M_BRI_SOFT) - goto do_reboot_ack; -do_reboot: - ack_timeout_cnt = 1; - if (--count < 0) - goto error_timeout; - d_printf(4, dev, "device reboot: reboot command [%d # left]\n", - count); - if ((flags & I2400M_BRI_NO_REBOOT) == 0) - i2400m_reset(i2400m, I2400M_RT_WARM); - result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack), - I2400M_BM_CMD_RAW); - flags &= ~I2400M_BRI_NO_REBOOT; - switch (result) { - case -ERESTARTSYS: - /* - * at this point, i2400m_bm_cmd(), through - * __i2400m_bm_ack_process(), has updated - * i2400m->barker and we are good to go. - */ - d_printf(4, dev, "device reboot: got reboot barker\n"); - break; - case -EISCONN: /* we don't know how it got here...but we follow it */ - d_printf(4, dev, "device reboot: got ack barker - whatever\n"); - goto do_reboot; - case -ETIMEDOUT: - /* - * Device has timed out, we might be in boot mode - * already and expecting an ack; if we don't know what - * the barker is, we just send them all. Cold reset - * and bus reset don't work. Beats me. - */ - if (i2400m->barker != NULL) { - dev_err(dev, "device boot: reboot barker timed out, " - "trying (set) %08x echo/ack\n", - le32_to_cpu(i2400m->barker->data[0])); - goto do_reboot_ack; - } - for (i = 0; i < i2400m_barker_db_used; i++) { - struct i2400m_barker_db *barker = &i2400m_barker_db[i]; - memcpy(cmd, barker->data, sizeof(barker->data)); - result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), - &ack, sizeof(ack), - I2400M_BM_CMD_RAW); - if (result == -EISCONN) { - dev_warn(dev, "device boot: got ack barker " - "after sending echo/ack barker " - "#%d/%08x; rebooting j.i.c.\n", - i, le32_to_cpu(barker->data[0])); - flags &= ~I2400M_BRI_NO_REBOOT; - goto do_reboot; - } - } - dev_err(dev, "device boot: tried all the echo/acks, could " - "not get device to respond; giving up"); - result = -ESHUTDOWN; - case -EPROTO: - case -ESHUTDOWN: /* dev is gone */ - case -EINTR: /* user cancelled */ - goto error_dev_gone; - default: - dev_err(dev, "device reboot: error %d while waiting " - "for reboot barker - rebooting\n", result); - d_dump(1, dev, &ack, result); - goto do_reboot; - } - /* At this point we ack back with 4 REBOOT barkers and expect - * 4 ACK barkers. This is ugly, as we send a raw command -- - * hence the cast. _bm_cmd() will catch the reboot ack - * notification and report it as -EISCONN. */ -do_reboot_ack: - d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count); - memcpy(cmd, i2400m->barker->data, sizeof(i2400m->barker->data)); - result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), - &ack, sizeof(ack), I2400M_BM_CMD_RAW); - switch (result) { - case -ERESTARTSYS: - d_printf(4, dev, "reboot ack: got reboot barker - retrying\n"); - if (--count < 0) - goto error_timeout; - goto do_reboot_ack; - case -EISCONN: - d_printf(4, dev, "reboot ack: got ack barker - good\n"); - break; - case -ETIMEDOUT: /* no response, maybe it is the other type? */ - if (ack_timeout_cnt-- < 0) { - d_printf(4, dev, "reboot ack timedout: retrying\n"); - goto do_reboot_ack; - } else { - dev_err(dev, "reboot ack timedout too long: " - "trying reboot\n"); - goto do_reboot; - } - break; - case -EPROTO: - case -ESHUTDOWN: /* dev is gone */ - goto error_dev_gone; - default: - dev_err(dev, "device reboot ack: error %d while waiting for " - "reboot ack barker - rebooting\n", result); - goto do_reboot; - } - d_printf(2, dev, "device reboot ack: got ack barker - boot done\n"); - result = 0; -exit_timeout: -error_dev_gone: - d_fnend(4, dev, "(i2400m %p flags 0x%08x) = %d\n", - i2400m, flags, result); - return result; - -error_timeout: - dev_err(dev, "Timed out waiting for reboot ack\n"); - result = -ETIMEDOUT; - goto exit_timeout; -} - - -/* - * Read the MAC addr - * - * The position this function reads is fixed in device memory and - * always available, even without firmware. - * - * Note we specify we want to read only six bytes, but provide space - * for 16, as we always get it rounded up. - */ -int i2400m_read_mac_addr(struct i2400m *i2400m) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - struct i2400m_bootrom_header *cmd; - struct { - struct i2400m_bootrom_header ack; - u8 ack_pl[16]; - } __packed ack_buf; - - d_fnstart(5, dev, "(i2400m %p)\n", i2400m); - cmd = i2400m->bm_cmd_buf; - cmd->command = i2400m_brh_command(I2400M_BRH_READ, 0, 1); - cmd->target_addr = cpu_to_le32(0x00203fe8); - cmd->data_size = cpu_to_le32(6); - result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), - &ack_buf.ack, sizeof(ack_buf), 0); - if (result < 0) { - dev_err(dev, "BM: read mac addr failed: %d\n", result); - goto error_read_mac; - } - d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl); - if (i2400m->bus_bm_mac_addr_impaired == 1) { - ack_buf.ack_pl[0] = 0x00; - ack_buf.ack_pl[1] = 0x16; - ack_buf.ack_pl[2] = 0xd3; - get_random_bytes(&ack_buf.ack_pl[3], 3); - dev_err(dev, "BM is MAC addr impaired, faking MAC addr to " - "mac addr is %pM\n", ack_buf.ack_pl); - result = 0; - } - net_dev->addr_len = ETH_ALEN; - memcpy(net_dev->dev_addr, ack_buf.ack_pl, ETH_ALEN); -error_read_mac: - d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; -} - - -/* - * Initialize a non signed boot - * - * This implies sending some magic values to the device's memory. Note - * we convert the values to little endian in the same array - * declaration. - */ -static -int i2400m_dnload_init_nonsigned(struct i2400m *i2400m) -{ - unsigned i = 0; - int ret = 0; - struct device *dev = i2400m_dev(i2400m); - d_fnstart(5, dev, "(i2400m %p)\n", i2400m); - if (i2400m->bus_bm_pokes_table) { - while (i2400m->bus_bm_pokes_table[i].address) { - ret = i2400m_download_chunk( - i2400m, - &i2400m->bus_bm_pokes_table[i].data, - sizeof(i2400m->bus_bm_pokes_table[i].data), - i2400m->bus_bm_pokes_table[i].address, 1, 1); - if (ret < 0) - break; - i++; - } - } - d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret); - return ret; -} - - -/* - * Initialize the signed boot process - * - * @i2400m: device descriptor - * - * @bcf_hdr: pointer to the firmware header; assumes it is fully in - * memory (it has gone through basic validation). - * - * Returns: 0 if ok, < 0 errno code on error, -ERESTARTSYS if the hw - * rebooted. - * - * This writes the firmware BCF header to the device using the - * HASH_PAYLOAD_ONLY command. - */ -static -int i2400m_dnload_init_signed(struct i2400m *i2400m, - const struct i2400m_bcf_hdr *bcf_hdr) -{ - int ret; - struct device *dev = i2400m_dev(i2400m); - struct { - struct i2400m_bootrom_header cmd; - struct i2400m_bcf_hdr cmd_pl; - } __packed *cmd_buf; - struct i2400m_bootrom_header ack; - - d_fnstart(5, dev, "(i2400m %p bcf_hdr %p)\n", i2400m, bcf_hdr); - cmd_buf = i2400m->bm_cmd_buf; - cmd_buf->cmd.command = - i2400m_brh_command(I2400M_BRH_HASH_PAYLOAD_ONLY, 0, 0); - cmd_buf->cmd.target_addr = 0; - cmd_buf->cmd.data_size = cpu_to_le32(sizeof(cmd_buf->cmd_pl)); - memcpy(&cmd_buf->cmd_pl, bcf_hdr, sizeof(*bcf_hdr)); - ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, sizeof(*cmd_buf), - &ack, sizeof(ack), 0); - if (ret >= 0) - ret = 0; - d_fnend(5, dev, "(i2400m %p bcf_hdr %p) = %d\n", i2400m, bcf_hdr, ret); - return ret; -} - - -/* - * Initialize the firmware download at the device size - * - * Multiplex to the one that matters based on the device's mode - * (signed or non-signed). - */ -static -int i2400m_dnload_init(struct i2400m *i2400m, - const struct i2400m_bcf_hdr *bcf_hdr) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - - if (i2400m_boot_is_signed(i2400m)) { - d_printf(1, dev, "signed boot\n"); - result = i2400m_dnload_init_signed(i2400m, bcf_hdr); - if (result == -ERESTARTSYS) - return result; - if (result < 0) - dev_err(dev, "firmware %s: signed boot download " - "initialization failed: %d\n", - i2400m->fw_name, result); - } else { - /* non-signed boot process without pokes */ - d_printf(1, dev, "non-signed boot\n"); - result = i2400m_dnload_init_nonsigned(i2400m); - if (result == -ERESTARTSYS) - return result; - if (result < 0) - dev_err(dev, "firmware %s: non-signed download " - "initialization failed: %d\n", - i2400m->fw_name, result); - } - return result; -} - - -/* - * Run consistency tests on the firmware file and load up headers - * - * Check for the firmware being made for the i2400m device, - * etc...These checks are mostly informative, as the device will make - * them too; but the driver's response is more informative on what - * went wrong. - * - * This will also look at all the headers present on the firmware - * file, and update i2400m->fw_bcf_hdr to point to them. - */ -static -int i2400m_fw_hdr_check(struct i2400m *i2400m, - const struct i2400m_bcf_hdr *bcf_hdr, - size_t index, size_t offset) -{ - struct device *dev = i2400m_dev(i2400m); - - unsigned module_type, header_len, major_version, minor_version, - module_id, module_vendor, date, size; - - module_type = le32_to_cpu(bcf_hdr->module_type); - header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); - major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000) - >> 16; - minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff; - module_id = le32_to_cpu(bcf_hdr->module_id); - module_vendor = le32_to_cpu(bcf_hdr->module_vendor); - date = le32_to_cpu(bcf_hdr->date); - size = sizeof(u32) * le32_to_cpu(bcf_hdr->size); - - d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header " - "type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n", - i2400m->fw_name, index, offset, - module_type, module_vendor, module_id, - major_version, minor_version, header_len, size, date); - - /* Hard errors */ - if (major_version != 1) { - dev_err(dev, "firmware %s #%zd@%08zx: major header version " - "v%u.%u not supported\n", - i2400m->fw_name, index, offset, - major_version, minor_version); - return -EBADF; - } - - if (module_type != 6) { /* built for the right hardware? */ - dev_err(dev, "firmware %s #%zd@%08zx: unexpected module " - "type 0x%x; aborting\n", - i2400m->fw_name, index, offset, - module_type); - return -EBADF; - } - - if (module_vendor != 0x8086) { - dev_err(dev, "firmware %s #%zd@%08zx: unexpected module " - "vendor 0x%x; aborting\n", - i2400m->fw_name, index, offset, module_vendor); - return -EBADF; - } - - if (date < 0x20080300) - dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x " - "too old; unsupported\n", - i2400m->fw_name, index, offset, date); - return 0; -} - - -/* - * Run consistency tests on the firmware file and load up headers - * - * Check for the firmware being made for the i2400m device, - * etc...These checks are mostly informative, as the device will make - * them too; but the driver's response is more informative on what - * went wrong. - * - * This will also look at all the headers present on the firmware - * file, and update i2400m->fw_hdrs to point to them. - */ -static -int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - size_t headers = 0; - const struct i2400m_bcf_hdr *bcf_hdr; - const void *itr, *next, *top; - size_t slots = 0, used_slots = 0; - - for (itr = bcf, top = itr + bcf_size; - itr < top; - headers++, itr = next) { - size_t leftover, offset, header_len, size; - - leftover = top - itr; - offset = itr - bcf; - if (leftover <= sizeof(*bcf_hdr)) { - dev_err(dev, "firmware %s: %zu B left at @%zx, " - "not enough for BCF header\n", - i2400m->fw_name, leftover, offset); - break; - } - bcf_hdr = itr; - /* Only the first header is supposed to be followed by - * payload */ - header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); - size = sizeof(u32) * le32_to_cpu(bcf_hdr->size); - if (headers == 0) - next = itr + size; - else - next = itr + header_len; - - result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset); - if (result < 0) - continue; - if (used_slots + 1 >= slots) { - /* +1 -> we need to account for the one we'll - * occupy and at least an extra one for - * always being NULL */ - result = i2400m_zrealloc_2x( - (void **) &i2400m->fw_hdrs, &slots, - sizeof(i2400m->fw_hdrs[0]), - GFP_KERNEL); - if (result < 0) - goto error_zrealloc; - } - i2400m->fw_hdrs[used_slots] = bcf_hdr; - used_slots++; - } - if (headers == 0) { - dev_err(dev, "firmware %s: no usable headers found\n", - i2400m->fw_name); - result = -EBADF; - } else - result = 0; -error_zrealloc: - return result; -} - - -/* - * Match a barker to a BCF header module ID - * - * The device sends a barker which tells the firmware loader which - * header in the BCF file has to be used. This does the matching. - */ -static -unsigned i2400m_bcf_hdr_match(struct i2400m *i2400m, - const struct i2400m_bcf_hdr *bcf_hdr) -{ - u32 barker = le32_to_cpu(i2400m->barker->data[0]) - & 0x7fffffff; - u32 module_id = le32_to_cpu(bcf_hdr->module_id) - & 0x7fffffff; /* high bit used for something else */ - - /* special case for 5x50 */ - if (barker == I2400M_SBOOT_BARKER && module_id == 0) - return 1; - if (module_id == barker) - return 1; - return 0; -} - -static -const struct i2400m_bcf_hdr *i2400m_bcf_hdr_find(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_bcf_hdr **bcf_itr, *bcf_hdr; - unsigned i = 0; - u32 barker = le32_to_cpu(i2400m->barker->data[0]); - - d_printf(2, dev, "finding BCF header for barker %08x\n", barker); - if (barker == I2400M_NBOOT_BARKER) { - bcf_hdr = i2400m->fw_hdrs[0]; - d_printf(1, dev, "using BCF header #%u/%08x for non-signed " - "barker\n", 0, le32_to_cpu(bcf_hdr->module_id)); - return bcf_hdr; - } - for (bcf_itr = i2400m->fw_hdrs; *bcf_itr != NULL; bcf_itr++, i++) { - bcf_hdr = *bcf_itr; - if (i2400m_bcf_hdr_match(i2400m, bcf_hdr)) { - d_printf(1, dev, "hit on BCF hdr #%u/%08x\n", - i, le32_to_cpu(bcf_hdr->module_id)); - return bcf_hdr; - } else - d_printf(1, dev, "miss on BCF hdr #%u/%08x\n", - i, le32_to_cpu(bcf_hdr->module_id)); - } - dev_err(dev, "cannot find a matching BCF header for barker %08x\n", - barker); - return NULL; -} - - -/* - * Download the firmware to the device - * - * @i2400m: device descriptor - * @bcf: pointer to loaded (and minimally verified for consistency) - * firmware - * @bcf_size: size of the @bcf buffer (header plus payloads) - * - * The process for doing this is described in this file's header. - * - * Note we only reinitialize boot-mode if the flags say so. Some hw - * iterations need it, some don't. In any case, if we loop, we always - * need to reinitialize the boot room, hence the flags modification. - */ -static -int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf, - size_t fw_size, enum i2400m_bri flags) -{ - int ret = 0; - struct device *dev = i2400m_dev(i2400m); - int count = i2400m->bus_bm_retries; - const struct i2400m_bcf_hdr *bcf_hdr; - size_t bcf_size; - - d_fnstart(5, dev, "(i2400m %p bcf %p fw size %zu)\n", - i2400m, bcf, fw_size); - i2400m->boot_mode = 1; - wmb(); /* Make sure other readers see it */ -hw_reboot: - if (count-- == 0) { - ret = -ERESTARTSYS; - dev_err(dev, "device rebooted too many times, aborting\n"); - goto error_too_many_reboots; - } - if (flags & I2400M_BRI_MAC_REINIT) { - ret = i2400m_bootrom_init(i2400m, flags); - if (ret < 0) { - dev_err(dev, "bootrom init failed: %d\n", ret); - goto error_bootrom_init; - } - } - flags |= I2400M_BRI_MAC_REINIT; - - /* - * Initialize the download, push the bytes to the device and - * then jump to the new firmware. Note @ret is passed with the - * offset of the jump instruction to _dnload_finalize() - * - * Note we need to use the BCF header in the firmware image - * that matches the barker that the device sent when it - * rebooted, so it has to be passed along. - */ - ret = -EBADF; - bcf_hdr = i2400m_bcf_hdr_find(i2400m); - if (bcf_hdr == NULL) - goto error_bcf_hdr_find; - - ret = i2400m_dnload_init(i2400m, bcf_hdr); - if (ret == -ERESTARTSYS) - goto error_dev_rebooted; - if (ret < 0) - goto error_dnload_init; - - /* - * bcf_size refers to one header size plus the fw sections size - * indicated by the header,ie. if there are other extended headers - * at the tail, they are not counted - */ - bcf_size = sizeof(u32) * le32_to_cpu(bcf_hdr->size); - ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size); - if (ret == -ERESTARTSYS) - goto error_dev_rebooted; - if (ret < 0) { - dev_err(dev, "fw %s: download failed: %d\n", - i2400m->fw_name, ret); - goto error_dnload_bcf; - } - - ret = i2400m_dnload_finalize(i2400m, bcf_hdr, bcf, ret); - if (ret == -ERESTARTSYS) - goto error_dev_rebooted; - if (ret < 0) { - dev_err(dev, "fw %s: " - "download finalization failed: %d\n", - i2400m->fw_name, ret); - goto error_dnload_finalize; - } - - d_printf(2, dev, "fw %s successfully uploaded\n", - i2400m->fw_name); - i2400m->boot_mode = 0; - wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */ -error_dnload_finalize: -error_dnload_bcf: -error_dnload_init: -error_bcf_hdr_find: -error_bootrom_init: -error_too_many_reboots: - d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n", - i2400m, bcf, fw_size, ret); - return ret; - -error_dev_rebooted: - dev_err(dev, "device rebooted, %d tries left\n", count); - /* we got the notification already, no need to wait for it again */ - flags |= I2400M_BRI_SOFT; - goto hw_reboot; -} - -static -int i2400m_fw_bootstrap(struct i2400m *i2400m, const struct firmware *fw, - enum i2400m_bri flags) -{ - int ret; - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_bcf_hdr *bcf; /* Firmware data */ - - d_fnstart(5, dev, "(i2400m %p)\n", i2400m); - bcf = (void *) fw->data; - ret = i2400m_fw_check(i2400m, bcf, fw->size); - if (ret >= 0) - ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags); - if (ret < 0) - dev_err(dev, "%s: cannot use: %d, skipping\n", - i2400m->fw_name, ret); - kfree(i2400m->fw_hdrs); - i2400m->fw_hdrs = NULL; - d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret); - return ret; -} - - -/* Refcounted container for firmware data */ -struct i2400m_fw { - struct kref kref; - const struct firmware *fw; -}; - - -static -void i2400m_fw_destroy(struct kref *kref) -{ - struct i2400m_fw *i2400m_fw = - container_of(kref, struct i2400m_fw, kref); - release_firmware(i2400m_fw->fw); - kfree(i2400m_fw); -} - - -static -struct i2400m_fw *i2400m_fw_get(struct i2400m_fw *i2400m_fw) -{ - if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) - kref_get(&i2400m_fw->kref); - return i2400m_fw; -} - - -static -void i2400m_fw_put(struct i2400m_fw *i2400m_fw) -{ - kref_put(&i2400m_fw->kref, i2400m_fw_destroy); -} - - -/** - * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware - * - * @i2400m: device descriptor - * - * Returns: >= 0 if ok, < 0 errno code on error. - * - * This sets up the firmware upload environment, loads the firmware - * file from disk, verifies and then calls the firmware upload process - * per se. - * - * Can be called either from probe, or after a warm reset. Can not be - * called from within an interrupt. All the flow in this code is - * single-threade; all I/Os are synchronous. - */ -int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) -{ - int ret, itr; - struct device *dev = i2400m_dev(i2400m); - struct i2400m_fw *i2400m_fw; - const struct firmware *fw; - const char *fw_name; - - d_fnstart(5, dev, "(i2400m %p)\n", i2400m); - - ret = -ENODEV; - spin_lock(&i2400m->rx_lock); - i2400m_fw = i2400m_fw_get(i2400m->fw_cached); - spin_unlock(&i2400m->rx_lock); - if (i2400m_fw == (void *) ~0) { - dev_err(dev, "can't load firmware now!"); - goto out; - } else if (i2400m_fw != NULL) { - dev_info(dev, "firmware %s: loading from cache\n", - i2400m->fw_name); - ret = i2400m_fw_bootstrap(i2400m, i2400m_fw->fw, flags); - i2400m_fw_put(i2400m_fw); - goto out; - } - - /* Load firmware files to memory. */ - for (itr = 0, ret = -ENOENT; ; itr++) { - fw_name = i2400m->bus_fw_names[itr]; - if (fw_name == NULL) { - dev_err(dev, "Could not find a usable firmware image\n"); - break; - } - d_printf(1, dev, "trying firmware %s (%d)\n", fw_name, itr); - ret = request_firmware(&fw, fw_name, dev); - if (ret < 0) { - dev_err(dev, "fw %s: cannot load file: %d\n", - fw_name, ret); - continue; - } - i2400m->fw_name = fw_name; - ret = i2400m_fw_bootstrap(i2400m, fw, flags); - release_firmware(fw); - if (ret >= 0) /* firmware loaded successfully */ - break; - i2400m->fw_name = NULL; - } -out: - d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret); - return ret; -} -EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap); - - -void i2400m_fw_cache(struct i2400m *i2400m) -{ - int result; - struct i2400m_fw *i2400m_fw; - struct device *dev = i2400m_dev(i2400m); - - /* if there is anything there, free it -- now, this'd be weird */ - spin_lock(&i2400m->rx_lock); - i2400m_fw = i2400m->fw_cached; - spin_unlock(&i2400m->rx_lock); - if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) { - i2400m_fw_put(i2400m_fw); - WARN(1, "%s:%u: still cached fw still present?\n", - __func__, __LINE__); - } - - if (i2400m->fw_name == NULL) { - dev_err(dev, "firmware n/a: can't cache\n"); - i2400m_fw = (void *) ~0; - goto out; - } - - i2400m_fw = kzalloc(sizeof(*i2400m_fw), GFP_ATOMIC); - if (i2400m_fw == NULL) - goto out; - kref_init(&i2400m_fw->kref); - result = request_firmware(&i2400m_fw->fw, i2400m->fw_name, dev); - if (result < 0) { - dev_err(dev, "firmware %s: failed to cache: %d\n", - i2400m->fw_name, result); - kfree(i2400m_fw); - i2400m_fw = (void *) ~0; - } else - dev_info(dev, "firmware %s: cached\n", i2400m->fw_name); -out: - spin_lock(&i2400m->rx_lock); - i2400m->fw_cached = i2400m_fw; - spin_unlock(&i2400m->rx_lock); -} - - -void i2400m_fw_uncache(struct i2400m *i2400m) -{ - struct i2400m_fw *i2400m_fw; - - spin_lock(&i2400m->rx_lock); - i2400m_fw = i2400m->fw_cached; - i2400m->fw_cached = NULL; - spin_unlock(&i2400m->rx_lock); - - if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) - i2400m_fw_put(i2400m_fw); -} - diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h deleted file mode 100644 index eff4f464a23e..000000000000 --- a/drivers/net/wimax/i2400m/i2400m-usb.h +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * USB-specific i2400m driver definitions - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - Initial implementation - * - * - * This driver implements the bus-specific part of the i2400m for - * USB. Check i2400m.h for a generic driver description. - * - * ARCHITECTURE - * - * This driver listens to notifications sent from the notification - * endpoint (in usb-notif.c); when data is ready to read, the code in - * there schedules a read from the device (usb-rx.c) and then passes - * the data to the generic RX code (rx.c). - * - * When the generic driver needs to send data (network or control), it - * queues up in the TX FIFO (tx.c) and that will notify the driver - * through the i2400m->bus_tx_kick() callback - * (usb-tx.c:i2400mu_bus_tx_kick) which will send the items in the - * FIFO queue. - * - * This driver, as well, implements the USB-specific ops for the generic - * driver to be able to setup/teardown communication with the device - * [i2400m_bus_dev_start() and i2400m_bus_dev_stop()], reseting the - * device [i2400m_bus_reset()] and performing firmware upload - * [i2400m_bus_bm_cmd() and i2400_bus_bm_wait_for_ack()]. - */ - -#ifndef __I2400M_USB_H__ -#define __I2400M_USB_H__ - -#include "i2400m.h" -#include <linux/kthread.h> - - -/* - * Error Density Count: cheapo error density (over time) counter - * - * Originally by Reinette Chatre <reinette.chatre@intel.com> - * - * Embed an 'struct edc' somewhere. Each time there is a soft or - * retryable error, call edc_inc() and check if the error top - * watermark has been reached. - */ -enum { - EDC_MAX_ERRORS = 10, - EDC_ERROR_TIMEFRAME = HZ, -}; - -/* error density counter */ -struct edc { - unsigned long timestart; - u16 errorcount; -}; - -struct i2400m_endpoint_cfg { - unsigned char bulk_out; - unsigned char notification; - unsigned char reset_cold; - unsigned char bulk_in; -}; - -static inline void edc_init(struct edc *edc) -{ - edc->timestart = jiffies; -} - -/** - * edc_inc - report a soft error and check if we are over the watermark - * - * @edc: pointer to error density counter. - * @max_err: maximum number of errors we can accept over the timeframe - * @timeframe: length of the timeframe (in jiffies). - * - * Returns: !0 1 if maximum acceptable errors per timeframe has been - * exceeded. 0 otherwise. - * - * This is way to determine if the number of acceptable errors per time - * period has been exceeded. It is not accurate as there are cases in which - * this scheme will not work, for example if there are periodic occurrences - * of errors that straddle updates to the start time. This scheme is - * sufficient for our usage. - * - * To use, embed a 'struct edc' somewhere, initialize it with - * edc_init() and when an error hits: - * - * if (do_something_fails_with_a_soft_error) { - * if (edc_inc(&my->edc, MAX_ERRORS, MAX_TIMEFRAME)) - * Ops, hard error, do something about it - * else - * Retry or ignore, depending on whatever - * } - */ -static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe) -{ - unsigned long now; - - now = jiffies; - if (time_after(now, edc->timestart + timeframe)) { - edc->errorcount = 1; - edc->timestart = now; - } else if (++edc->errorcount > max_err) { - edc->errorcount = 0; - edc->timestart = now; - return 1; - } - return 0; -} - -/* Host-Device interface for USB */ -enum { - I2400M_USB_BOOT_RETRIES = 3, - I2400MU_MAX_NOTIFICATION_LEN = 256, - I2400MU_BLK_SIZE = 16, - I2400MU_PL_SIZE_MAX = 0x3EFF, - - /* Device IDs */ - USB_DEVICE_ID_I6050 = 0x0186, - USB_DEVICE_ID_I6050_2 = 0x0188, - USB_DEVICE_ID_I6150 = 0x07d6, - USB_DEVICE_ID_I6150_2 = 0x07d7, - USB_DEVICE_ID_I6150_3 = 0x07d9, - USB_DEVICE_ID_I6250 = 0x0187, -}; - - -/** - * struct i2400mu - descriptor for a USB connected i2400m - * - * @i2400m: bus-generic i2400m implementation; has to be first (see - * it's documentation in i2400m.h). - * - * @usb_dev: pointer to our USB device - * - * @usb_iface: pointer to our USB interface - * - * @urb_edc: error density counter; used to keep a density-on-time tab - * on how many soft (retryable or ignorable) errors we get. If we - * go over the threshold, we consider the bus transport is failing - * too much and reset. - * - * @notif_urb: URB for receiving notifications from the device. - * - * @tx_kthread: thread we use for data TX. We use a thread because in - * order to do deep power saving and put the device to sleep, we - * need to call usb_autopm_*() [blocking functions]. - * - * @tx_wq: waitqueue for the TX kthread to sleep when there is no data - * to be sent; when more data is available, it is woken up by - * i2400mu_bus_tx_kick(). - * - * @rx_kthread: thread we use for data RX. We use a thread because in - * order to do deep power saving and put the device to sleep, we - * need to call usb_autopm_*() [blocking functions]. - * - * @rx_wq: waitqueue for the RX kthread to sleep when there is no data - * to receive. When data is available, it is woken up by - * usb-notif.c:i2400mu_notification_grok(). - * - * @rx_pending_count: number of rx-data-ready notifications that were - * still not handled by the RX kthread. - * - * @rx_size: current RX buffer size that is being used. - * - * @rx_size_acc: accumulator of the sizes of the previous read - * transactions. - * - * @rx_size_cnt: number of read transactions accumulated in - * @rx_size_acc. - * - * @do_autopm: disable(0)/enable(>0) calling the - * usb_autopm_get/put_interface() barriers when executing - * commands. See doc in i2400mu_suspend() for more information. - * - * @rx_size_auto_shrink: if true, the rx_size is shrunk - * automatically based on the average size of the received - * transactions. This allows the receive code to allocate smaller - * chunks of memory and thus reduce pressure on the memory - * allocator by not wasting so much space. By default it is - * enabled. - * - * @debugfs_dentry: hookup for debugfs files. - * These have to be in a separate directory, a child of - * (wimax_dev->debugfs_dentry) so they can be removed when the - * module unloads, as we don't keep each dentry. - */ -struct i2400mu { - struct i2400m i2400m; /* FIRST! See doc */ - - struct usb_device *usb_dev; - struct usb_interface *usb_iface; - struct edc urb_edc; /* Error density counter */ - struct i2400m_endpoint_cfg endpoint_cfg; - - struct urb *notif_urb; - struct task_struct *tx_kthread; - wait_queue_head_t tx_wq; - - struct task_struct *rx_kthread; - wait_queue_head_t rx_wq; - atomic_t rx_pending_count; - size_t rx_size, rx_size_acc, rx_size_cnt; - atomic_t do_autopm; - u8 rx_size_auto_shrink; - - struct dentry *debugfs_dentry; - unsigned i6050:1; /* 1 if this is a 6050 based SKU */ -}; - - -static inline -void i2400mu_init(struct i2400mu *i2400mu) -{ - i2400m_init(&i2400mu->i2400m); - edc_init(&i2400mu->urb_edc); - init_waitqueue_head(&i2400mu->tx_wq); - atomic_set(&i2400mu->rx_pending_count, 0); - init_waitqueue_head(&i2400mu->rx_wq); - i2400mu->rx_size = PAGE_SIZE - sizeof(struct skb_shared_info); - atomic_set(&i2400mu->do_autopm, 1); - i2400mu->rx_size_auto_shrink = 1; -} - -int i2400mu_notification_setup(struct i2400mu *); -void i2400mu_notification_release(struct i2400mu *); - -int i2400mu_rx_setup(struct i2400mu *); -void i2400mu_rx_release(struct i2400mu *); -void i2400mu_rx_kick(struct i2400mu *); - -int i2400mu_tx_setup(struct i2400mu *); -void i2400mu_tx_release(struct i2400mu *); -void i2400mu_bus_tx_kick(struct i2400m *); - -ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *, - const struct i2400m_bootrom_header *, size_t, - int); -ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *, - struct i2400m_bootrom_header *, size_t); -#endif /* #ifndef __I2400M_USB_H__ */ diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h deleted file mode 100644 index a3733a6d14f5..000000000000 --- a/drivers/net/wimax/i2400m/i2400m.h +++ /dev/null @@ -1,970 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * Declarations for bus-generic internal APIs - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - Initial implementation - * - * - * GENERAL DRIVER ARCHITECTURE - * - * The i2400m driver is split in the following two major parts: - * - * - bus specific driver - * - bus generic driver (this part) - * - * The bus specific driver sets up stuff specific to the bus the - * device is connected to (USB, PCI, tam-tam...non-authoritative - * nor binding list) which is basically the device-model management - * (probe/disconnect, etc), moving data from device to kernel and - * back, doing the power saving details and reseting the device. - * - * For details on each bus-specific driver, see it's include file, - * i2400m-BUSNAME.h - * - * The bus-generic functionality break up is: - * - * - Firmware upload: fw.c - takes care of uploading firmware to the - * device. bus-specific driver just needs to provides a way to - * execute boot-mode commands and to reset the device. - * - * - RX handling: rx.c - receives data from the bus-specific code and - * feeds it to the network or WiMAX stack or uses it to modify - * the driver state. bus-specific driver only has to receive - * frames and pass them to this module. - * - * - TX handling: tx.c - manages the TX FIFO queue and provides means - * for the bus-specific TX code to pull data from the FIFO - * queue. bus-specific code just pulls frames from this module - * to sends them to the device. - * - * - netdev glue: netdev.c - interface with Linux networking - * stack. Pass around data frames, and configure when the - * device is up and running or shutdown (through ifconfig up / - * down). Bus-generic only. - * - * - control ops: control.c - implements various commands for - * controlling the device. bus-generic only. - * - * - device model glue: driver.c - implements helpers for the - * device-model glue done by the bus-specific layer - * (setup/release the driver resources), turning the device on - * and off, handling the device reboots/resets and a few simple - * WiMAX stack ops. - * - * Code is also broken up in linux-glue / device-glue. - * - * Linux glue contains functions that deal mostly with gluing with the - * rest of the Linux kernel. - * - * Device-glue are functions that deal mostly with the way the device - * does things and talk the device's language. - * - * device-glue code is licensed BSD so other open source OSes can take - * it to implement their drivers. - * - * - * APIs AND HEADER FILES - * - * This bus generic code exports three APIs: - * - * - HDI (host-device interface) definitions common to all busses - * (include/linux/wimax/i2400m.h); these can be also used by user - * space code. - * - internal API for the bus-generic code - * - external API for the bus-specific drivers - * - * - * LIFE CYCLE: - * - * When the bus-specific driver probes, it allocates a network device - * with enough space for it's data structue, that must contain a - * &struct i2400m at the top. - * - * On probe, it needs to fill the i2400m members marked as [fill], as - * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The - * i2400m driver will only register with the WiMAX and network stacks; - * the only access done to the device is to read the MAC address so we - * can register a network device. - * - * The high-level call flow is: - * - * bus_probe() - * i2400m_setup() - * i2400m->bus_setup() - * boot rom initialization / read mac addr - * network / WiMAX stacks registration - * i2400m_dev_start() - * i2400m->bus_dev_start() - * i2400m_dev_initialize() - * - * The reverse applies for a disconnect() call: - * - * bus_disconnect() - * i2400m_release() - * i2400m_dev_stop() - * i2400m_dev_shutdown() - * i2400m->bus_dev_stop() - * network / WiMAX stack unregistration - * i2400m->bus_release() - * - * At this point, control and data communications are possible. - * - * While the device is up, it might reset. The bus-specific driver has - * to catch that situation and call i2400m_dev_reset_handle() to deal - * with it (reset the internal driver structures and go back to square - * one). - */ - -#ifndef __I2400M_H__ -#define __I2400M_H__ - -#include <linux/usb.h> -#include <linux/netdevice.h> -#include <linux/completion.h> -#include <linux/rwsem.h> -#include <linux/atomic.h> -#include <net/wimax.h> -#include <linux/wimax/i2400m.h> -#include <asm/byteorder.h> - -enum { -/* netdev interface */ - /* - * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size - * - * The MTU is 1400 or less - */ - I2400M_MAX_MTU = 1400, -}; - -/* Misc constants */ -enum { - /* Size of the Boot Mode Command buffer */ - I2400M_BM_CMD_BUF_SIZE = 16 * 1024, - I2400M_BM_ACK_BUF_SIZE = 256, -}; - -enum { - /* Maximum number of bus reset can be retried */ - I2400M_BUS_RESET_RETRIES = 3, -}; - -/** - * struct i2400m_poke_table - Hardware poke table for the Intel 2400m - * - * This structure will be used to create a device specific poke table - * to put the device in a consistent state at boot time. - * - * @address: The device address to poke - * - * @data: The data value to poke to the device address - * - */ -struct i2400m_poke_table{ - __le32 address; - __le32 data; -}; - -#define I2400M_FW_POKE(a, d) { \ - .address = cpu_to_le32(a), \ - .data = cpu_to_le32(d) \ -} - - -/** - * i2400m_reset_type - methods to reset a device - * - * @I2400M_RT_WARM: Reset without device disconnection, device handles - * are kept valid but state is back to power on, with firmware - * re-uploaded. - * @I2400M_RT_COLD: Tell the device to disconnect itself from the bus - * and reconnect. Renders all device handles invalid. - * @I2400M_RT_BUS: Tells the bus to reset the device; last measure - * used when both types above don't work. - */ -enum i2400m_reset_type { - I2400M_RT_WARM, /* first measure */ - I2400M_RT_COLD, /* second measure */ - I2400M_RT_BUS, /* call in artillery */ -}; - -struct i2400m_reset_ctx; -struct i2400m_roq; -struct i2400m_barker_db; - -/** - * struct i2400m - descriptor for an Intel 2400m - * - * Members marked with [fill] must be filled out/initialized before - * calling i2400m_setup(). - * - * Note the @bus_setup/@bus_release, @bus_dev_start/@bus_dev_release - * call pairs are very much doing almost the same, and depending on - * the underlying bus, some stuff has to be put in one or the - * other. The idea of setup/release is that they setup the minimal - * amount needed for loading firmware, where us dev_start/stop setup - * the rest needed to do full data/control traffic. - * - * @bus_tx_block_size: [fill] USB imposes a 16 block size, but other - * busses will differ. So we have a tx_blk_size variable that the - * bus layer sets to tell the engine how much of that we need. - * - * @bus_tx_room_min: [fill] Minimum room required while allocating - * TX queue's buffer space for message header. USB requires - * 16 bytes. Refer to bus specific driver code for details. - * - * @bus_pl_size_max: [fill] Maximum payload size. - * - * @bus_setup: [optional fill] Function called by the bus-generic code - * [i2400m_setup()] to setup the basic bus-specific communications - * to the the device needed to load firmware. See LIFE CYCLE above. - * - * NOTE: Doesn't need to upload the firmware, as that is taken - * care of by the bus-generic code. - * - * @bus_release: [optional fill] Function called by the bus-generic - * code [i2400m_release()] to shutdown the basic bus-specific - * communications to the the device needed to load firmware. See - * LIFE CYCLE above. - * - * This function does not need to reset the device, just tear down - * all the host resources created to handle communication with - * the device. - * - * @bus_dev_start: [optional fill] Function called by the bus-generic - * code [i2400m_dev_start()] to do things needed to start the - * device. See LIFE CYCLE above. - * - * NOTE: Doesn't need to upload the firmware, as that is taken - * care of by the bus-generic code. - * - * @bus_dev_stop: [optional fill] Function called by the bus-generic - * code [i2400m_dev_stop()] to do things needed for stopping the - * device. See LIFE CYCLE above. - * - * This function does not need to reset the device, just tear down - * all the host resources created to handle communication with - * the device. - * - * @bus_tx_kick: [fill] Function called by the bus-generic code to let - * the bus-specific code know that there is data available in the - * TX FIFO for transmission to the device. - * - * This function cannot sleep. - * - * @bus_reset: [fill] Function called by the bus-generic code to reset - * the device in in various ways. Doesn't need to wait for the - * reset to finish. - * - * If warm or cold reset fail, this function is expected to do a - * bus-specific reset (eg: USB reset) to get the device to a - * working state (even if it implies device disconecction). - * - * Note the warm reset is used by the firmware uploader to - * reinitialize the device. - * - * IMPORTANT: this is called very early in the device setup - * process, so it cannot rely on common infrastructure being laid - * out. - * - * IMPORTANT: don't call reset on RT_BUS with i2400m->init_mutex - * held, as the .pre/.post reset handlers will deadlock. - * - * @bus_bm_retries: [fill] How many times shall a firmware upload / - * device initialization be retried? Different models of the same - * device might need different values, hence it is set by the - * bus-specific driver. Note this value is used in two places, - * i2400m_fw_dnload() and __i2400m_dev_start(); they won't become - * multiplicative (__i2400m_dev_start() calling N times - * i2400m_fw_dnload() and this trying N times to download the - * firmware), as if __i2400m_dev_start() only retries if the - * firmware crashed while initializing the device (not in a - * general case). - * - * @bus_bm_cmd_send: [fill] Function called to send a boot-mode - * command. Flags are defined in 'enum i2400m_bm_cmd_flags'. This - * is synchronous and has to return 0 if ok or < 0 errno code in - * any error condition. - * - * @bus_bm_wait_for_ack: [fill] Function called to wait for a - * boot-mode notification (that can be a response to a previously - * issued command or an asynchronous one). Will read until all the - * indicated size is read or timeout. Reading more or less data - * than asked for is an error condition. Return 0 if ok, < 0 errno - * code on error. - * - * The caller to this function will check if the response is a - * barker that indicates the device going into reset mode. - * - * @bus_fw_names: [fill] a NULL-terminated array with the names of the - * firmware images to try loading. This is made a list so we can - * support backward compatibility of firmware releases (eg: if we - * can't find the default v1.4, we try v1.3). In general, the name - * should be i2400m-fw-X-VERSION.sbcf, where X is the bus name. - * The list is tried in order and the first one that loads is - * used. The fw loader will set i2400m->fw_name to point to the - * active firmware image. - * - * @bus_bm_mac_addr_impaired: [fill] Set to true if the device's MAC - * address provided in boot mode is kind of broken and needs to - * be re-read later on. - * - * @bus_bm_pokes_table: [fill/optional] A table of device addresses - * and values that will be poked at device init time to move the - * device to the correct state for the type of boot/firmware being - * used. This table MUST be terminated with (0x000000, - * 0x00000000) or bad things will happen. - * - * - * @wimax_dev: WiMAX generic device for linkage into the kernel WiMAX - * stack. Due to the way a net_device is allocated, we need to - * force this to be the first field so that we can get from - * netdev_priv() the right pointer. - * - * @updown: the device is up and ready for transmitting control and - * data packets. This implies @ready (communication infrastructure - * with the device is ready) and the device's firmware has been - * loaded and the device initialized. - * - * Write to it only inside a i2400m->init_mutex protected area - * followed with a wmb(); rmb() before accesing (unless locked - * inside i2400m->init_mutex). Read access can be loose like that - * [just using rmb()] because the paths that use this also do - * other error checks later on. - * - * @ready: Communication infrastructure with the device is ready, data - * frames can start to be passed around (this is lighter than - * using the WiMAX state for certain hot paths). - * - * Write to it only inside a i2400m->init_mutex protected area - * followed with a wmb(); rmb() before accesing (unless locked - * inside i2400m->init_mutex). Read access can be loose like that - * [just using rmb()] because the paths that use this also do - * other error checks later on. - * - * @rx_reorder: 1 if RX reordering is enabled; this can only be - * set at probe time. - * - * @state: device's state (as reported by it) - * - * @state_wq: waitqueue that is woken up whenever the state changes - * - * @tx_lock: spinlock to protect TX members - * - * @tx_buf: FIFO buffer for TX; we queue data here - * - * @tx_in: FIFO index for incoming data. Note this doesn't wrap around - * and it is always greater than @tx_out. - * - * @tx_out: FIFO index for outgoing data - * - * @tx_msg: current TX message that is active in the FIFO for - * appending payloads. - * - * @tx_sequence: current sequence number for TX messages from the - * device to the host. - * - * @tx_msg_size: size of the current message being transmitted by the - * bus-specific code. - * - * @tx_pl_num: total number of payloads sent - * - * @tx_pl_max: maximum number of payloads sent in a TX message - * - * @tx_pl_min: minimum number of payloads sent in a TX message - * - * @tx_num: number of TX messages sent - * - * @tx_size_acc: number of bytes in all TX messages sent - * (this is different to net_dev's statistics as it also counts - * control messages). - * - * @tx_size_min: smallest TX message sent. - * - * @tx_size_max: biggest TX message sent. - * - * @rx_lock: spinlock to protect RX members and rx_roq_refcount. - * - * @rx_pl_num: total number of payloads received - * - * @rx_pl_max: maximum number of payloads received in a RX message - * - * @rx_pl_min: minimum number of payloads received in a RX message - * - * @rx_num: number of RX messages received - * - * @rx_size_acc: number of bytes in all RX messages received - * (this is different to net_dev's statistics as it also counts - * control messages). - * - * @rx_size_min: smallest RX message received. - * - * @rx_size_max: buggest RX message received. - * - * @rx_roq: RX ReOrder queues. (fw >= v1.4) When packets are received - * out of order, the device will ask the driver to hold certain - * packets until the ones that are received out of order can be - * delivered. Then the driver can release them to the host. See - * drivers/net/i2400m/rx.c for details. - * - * @rx_roq_refcount: refcount rx_roq. This refcounts any access to - * rx_roq thus preventing rx_roq being destroyed when rx_roq - * is being accessed. rx_roq_refcount is protected by rx_lock. - * - * @rx_reports: reports received from the device that couldn't be - * processed because the driver wasn't still ready; when ready, - * they are pulled from here and chewed. - * - * @rx_reports_ws: Work struct used to kick a scan of the RX reports - * list and to process each. - * - * @src_mac_addr: MAC address used to make ethernet packets be coming - * from. This is generated at i2400m_setup() time and used during - * the life cycle of the instance. See i2400m_fake_eth_header(). - * - * @init_mutex: Mutex used for serializing the device bringup - * sequence; this way if the device reboots in the middle, we - * don't try to do a bringup again while we are tearing down the - * one that failed. - * - * Can't reuse @msg_mutex because from within the bringup sequence - * we need to send messages to the device and thus use @msg_mutex. - * - * @msg_mutex: mutex used to send control commands to the device (we - * only allow one at a time, per host-device interface design). - * - * @msg_completion: used to wait for an ack to a control command sent - * to the device. - * - * @ack_skb: used to store the actual ack to a control command if the - * reception of the command was successful. Otherwise, a ERR_PTR() - * errno code that indicates what failed with the ack reception. - * - * Only valid after @msg_completion is woken up. Only updateable - * if @msg_completion is armed. Only touched by - * i2400m_msg_to_dev(). - * - * Protected by @rx_lock. In theory the command execution flow is - * sequential, but in case the device sends an out-of-phase or - * very delayed response, we need to avoid it trampling current - * execution. - * - * @bm_cmd_buf: boot mode command buffer for composing firmware upload - * commands. - * - * USB can't r/w to stack, vmalloc, etc...as well, we end up - * having to alloc/free a lot to compose commands, so we use these - * for stagging and not having to realloc all the time. - * - * This assumes the code always runs serialized. Only one thread - * can call i2400m_bm_cmd() at the same time. - * - * @bm_ack_buf: boot mode acknoledge buffer for staging reception of - * responses to commands. - * - * See @bm_cmd_buf. - * - * @work_queue: work queue for processing device reports. This - * workqueue cannot be used for processing TX or RX to the device, - * as from it we'll process device reports, which might require - * further communication with the device. - * - * @debugfs_dentry: hookup for debugfs files. - * These have to be in a separate directory, a child of - * (wimax_dev->debugfs_dentry) so they can be removed when the - * module unloads, as we don't keep each dentry. - * - * @fw_name: name of the firmware image that is currently being used. - * - * @fw_version: version of the firmware interface, Major.minor, - * encoded in the high word and low word (major << 16 | minor). - * - * @fw_hdrs: NULL terminated array of pointers to the firmware - * headers. This is only available during firmware load time. - * - * @fw_cached: Used to cache firmware when the system goes to - * suspend/standby/hibernation (as on resume we can't read it). If - * NULL, no firmware was cached, read it. If ~0, you can't read - * any firmware files (the system still didn't come out of suspend - * and failed to cache one), so abort; otherwise, a valid cached - * firmware to be used. Access to this variable is protected by - * the spinlock i2400m->rx_lock. - * - * @barker: barker type that the device uses; this is initialized by - * i2400m_is_boot_barker() the first time it is called. Then it - * won't change during the life cycle of the device and every time - * a boot barker is received, it is just verified for it being the - * same. - * - * @pm_notifier: used to register for PM events - * - * @bus_reset_retries: counter for the number of bus resets attempted for - * this boot. It's not for tracking the number of bus resets during - * the whole driver life cycle (from insmod to rmmod) but for the - * number of dev_start() executed until dev_start() returns a success - * (ie: a good boot means a dev_stop() followed by a successful - * dev_start()). dev_reset_handler() increments this counter whenever - * it is triggering a bus reset. It checks this counter to decide if a - * subsequent bus reset should be retried. dev_reset_handler() retries - * the bus reset until dev_start() succeeds or the counter reaches - * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in - * dev_reset_handle() when dev_start() returns a success, - * ie: a successul boot is completed. - * - * @alive: flag to denote if the device *should* be alive. This flag is - * everything like @updown (see doc for @updown) except reflecting - * the device state *we expect* rather than the actual state as denoted - * by @updown. It is set 1 whenever @updown is set 1 in dev_start(). - * Then the device is expected to be alive all the time - * (i2400m->alive remains 1) until the driver is removed. Therefore - * all the device reboot events detected can be still handled properly - * by either dev_reset_handle() or .pre_reset/.post_reset as long as - * the driver presents. It is set 0 along with @updown in dev_stop(). - * - * @error_recovery: flag to denote if we are ready to take an error recovery. - * 0 for ready to take an error recovery; 1 for not ready. It is - * initialized to 1 while probe() since we don't tend to take any error - * recovery during probe(). It is decremented by 1 whenever dev_start() - * succeeds to indicate we are ready to take error recovery from now on. - * It is checked every time we wanna schedule an error recovery. If an - * error recovery is already in place (error_recovery was set 1), we - * should not schedule another one until the last one is done. - */ -struct i2400m { - struct wimax_dev wimax_dev; /* FIRST! See doc */ - - unsigned updown:1; /* Network device is up or down */ - unsigned boot_mode:1; /* is the device in boot mode? */ - unsigned sboot:1; /* signed or unsigned fw boot */ - unsigned ready:1; /* Device comm infrastructure ready */ - unsigned rx_reorder:1; /* RX reorder is enabled */ - u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */ - /* typed u8 so /sys/kernel/debug/u8 can tweak */ - enum i2400m_system_state state; - wait_queue_head_t state_wq; /* Woken up when on state updates */ - - size_t bus_tx_block_size; - size_t bus_tx_room_min; - size_t bus_pl_size_max; - unsigned bus_bm_retries; - - int (*bus_setup)(struct i2400m *); - int (*bus_dev_start)(struct i2400m *); - void (*bus_dev_stop)(struct i2400m *); - void (*bus_release)(struct i2400m *); - void (*bus_tx_kick)(struct i2400m *); - int (*bus_reset)(struct i2400m *, enum i2400m_reset_type); - ssize_t (*bus_bm_cmd_send)(struct i2400m *, - const struct i2400m_bootrom_header *, - size_t, int flags); - ssize_t (*bus_bm_wait_for_ack)(struct i2400m *, - struct i2400m_bootrom_header *, size_t); - const char **bus_fw_names; - unsigned bus_bm_mac_addr_impaired:1; - const struct i2400m_poke_table *bus_bm_pokes_table; - - spinlock_t tx_lock; /* protect TX state */ - void *tx_buf; - size_t tx_in, tx_out; - struct i2400m_msg_hdr *tx_msg; - size_t tx_sequence, tx_msg_size; - /* TX stats */ - unsigned tx_pl_num, tx_pl_max, tx_pl_min, - tx_num, tx_size_acc, tx_size_min, tx_size_max; - - /* RX stuff */ - /* protect RX state and rx_roq_refcount */ - spinlock_t rx_lock; - unsigned rx_pl_num, rx_pl_max, rx_pl_min, - rx_num, rx_size_acc, rx_size_min, rx_size_max; - struct i2400m_roq *rx_roq; /* access is refcounted */ - struct kref rx_roq_refcount; /* refcount access to rx_roq */ - u8 src_mac_addr[ETH_HLEN]; - struct list_head rx_reports; /* under rx_lock! */ - struct work_struct rx_report_ws; - - struct mutex msg_mutex; /* serialize command execution */ - struct completion msg_completion; - struct sk_buff *ack_skb; /* protected by rx_lock */ - - void *bm_ack_buf; /* for receiving acks over USB */ - void *bm_cmd_buf; /* for issuing commands over USB */ - - struct workqueue_struct *work_queue; - - struct mutex init_mutex; /* protect bringup seq */ - struct i2400m_reset_ctx *reset_ctx; /* protected by init_mutex */ - - struct work_struct wake_tx_ws; - struct sk_buff *wake_tx_skb; - - struct work_struct reset_ws; - const char *reset_reason; - - struct work_struct recovery_ws; - - struct dentry *debugfs_dentry; - const char *fw_name; /* name of the current firmware image */ - unsigned long fw_version; /* version of the firmware interface */ - const struct i2400m_bcf_hdr **fw_hdrs; - struct i2400m_fw *fw_cached; /* protected by rx_lock */ - struct i2400m_barker_db *barker; - - struct notifier_block pm_notifier; - - /* counting bus reset retries in this boot */ - atomic_t bus_reset_retries; - - /* if the device is expected to be alive */ - unsigned alive; - - /* 0 if we are ready for error recovery; 1 if not ready */ - atomic_t error_recovery; - -}; - - -/* - * Bus-generic internal APIs - * ------------------------- - */ - -static inline -struct i2400m *wimax_dev_to_i2400m(struct wimax_dev *wimax_dev) -{ - return container_of(wimax_dev, struct i2400m, wimax_dev); -} - -static inline -struct i2400m *net_dev_to_i2400m(struct net_device *net_dev) -{ - return wimax_dev_to_i2400m(netdev_priv(net_dev)); -} - -/* - * Boot mode support - */ - -/** - * i2400m_bm_cmd_flags - flags to i2400m_bm_cmd() - * - * @I2400M_BM_CMD_RAW: send the command block as-is, without doing any - * extra processing for adding CRC. - */ -enum i2400m_bm_cmd_flags { - I2400M_BM_CMD_RAW = 1 << 2, -}; - -/** - * i2400m_bri - Boot-ROM indicators - * - * Flags for i2400m_bootrom_init() and i2400m_dev_bootstrap() [which - * are passed from things like i2400m_setup()]. Can be combined with - * |. - * - * @I2400M_BRI_SOFT: The device rebooted already and a reboot - * barker received, proceed directly to ack the boot sequence. - * @I2400M_BRI_NO_REBOOT: Do not reboot the device and proceed - * directly to wait for a reboot barker from the device. - * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot - * rom after reading the MAC address. This is quite a dirty hack, - * if you ask me -- the device requires the bootrom to be - * initialized after reading the MAC address. - */ -enum i2400m_bri { - I2400M_BRI_SOFT = 1 << 1, - I2400M_BRI_NO_REBOOT = 1 << 2, - I2400M_BRI_MAC_REINIT = 1 << 3, -}; - -void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *); -int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri); -int i2400m_read_mac_addr(struct i2400m *); -int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri); -int i2400m_is_boot_barker(struct i2400m *, const void *, size_t); -static inline -int i2400m_is_d2h_barker(const void *buf) -{ - const __le32 *barker = buf; - return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER; -} -void i2400m_unknown_barker(struct i2400m *, const void *, size_t); - -/* Make/grok boot-rom header commands */ - -static inline -__le32 i2400m_brh_command(enum i2400m_brh_opcode opcode, unsigned use_checksum, - unsigned direct_access) -{ - return cpu_to_le32( - I2400M_BRH_SIGNATURE - | (direct_access ? I2400M_BRH_DIRECT_ACCESS : 0) - | I2400M_BRH_RESPONSE_REQUIRED /* response always required */ - | (use_checksum ? I2400M_BRH_USE_CHECKSUM : 0) - | (opcode & I2400M_BRH_OPCODE_MASK)); -} - -static inline -void i2400m_brh_set_opcode(struct i2400m_bootrom_header *hdr, - enum i2400m_brh_opcode opcode) -{ - hdr->command = cpu_to_le32( - (le32_to_cpu(hdr->command) & ~I2400M_BRH_OPCODE_MASK) - | (opcode & I2400M_BRH_OPCODE_MASK)); -} - -static inline -unsigned i2400m_brh_get_opcode(const struct i2400m_bootrom_header *hdr) -{ - return le32_to_cpu(hdr->command) & I2400M_BRH_OPCODE_MASK; -} - -static inline -unsigned i2400m_brh_get_response(const struct i2400m_bootrom_header *hdr) -{ - return (le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_MASK) - >> I2400M_BRH_RESPONSE_SHIFT; -} - -static inline -unsigned i2400m_brh_get_use_checksum(const struct i2400m_bootrom_header *hdr) -{ - return le32_to_cpu(hdr->command) & I2400M_BRH_USE_CHECKSUM; -} - -static inline -unsigned i2400m_brh_get_response_required( - const struct i2400m_bootrom_header *hdr) -{ - return le32_to_cpu(hdr->command) & I2400M_BRH_RESPONSE_REQUIRED; -} - -static inline -unsigned i2400m_brh_get_direct_access(const struct i2400m_bootrom_header *hdr) -{ - return le32_to_cpu(hdr->command) & I2400M_BRH_DIRECT_ACCESS; -} - -static inline -unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr) -{ - return (le32_to_cpu(hdr->command) & I2400M_BRH_SIGNATURE_MASK) - >> I2400M_BRH_SIGNATURE_SHIFT; -} - - -/* - * Driver / device setup and internal functions - */ -void i2400m_init(struct i2400m *); -int i2400m_reset(struct i2400m *, enum i2400m_reset_type); -void i2400m_netdev_setup(struct net_device *net_dev); -int i2400m_sysfs_setup(struct device_driver *); -void i2400m_sysfs_release(struct device_driver *); -int i2400m_tx_setup(struct i2400m *); -void i2400m_wake_tx_work(struct work_struct *); -void i2400m_tx_release(struct i2400m *); - -int i2400m_rx_setup(struct i2400m *); -void i2400m_rx_release(struct i2400m *); - -void i2400m_fw_cache(struct i2400m *); -void i2400m_fw_uncache(struct i2400m *); - -void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *, - int); -void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs); -void i2400m_net_wake_stop(struct i2400m *); -enum i2400m_pt; -int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt); - -#ifdef CONFIG_DEBUG_FS -void i2400m_debugfs_add(struct i2400m *); -void i2400m_debugfs_rm(struct i2400m *); -#else -static inline void i2400m_debugfs_add(struct i2400m *i2400m) {} -static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {} -#endif - -/* Initialize/shutdown the device */ -int i2400m_dev_initialize(struct i2400m *); -void i2400m_dev_shutdown(struct i2400m *); - -extern struct attribute_group i2400m_dev_attr_group; - - -/* HDI message's payload description handling */ - -static inline -size_t i2400m_pld_size(const struct i2400m_pld *pld) -{ - return I2400M_PLD_SIZE_MASK & le32_to_cpu(pld->val); -} - -static inline -enum i2400m_pt i2400m_pld_type(const struct i2400m_pld *pld) -{ - return (I2400M_PLD_TYPE_MASK & le32_to_cpu(pld->val)) - >> I2400M_PLD_TYPE_SHIFT; -} - -static inline -void i2400m_pld_set(struct i2400m_pld *pld, size_t size, - enum i2400m_pt type) -{ - pld->val = cpu_to_le32( - ((type << I2400M_PLD_TYPE_SHIFT) & I2400M_PLD_TYPE_MASK) - | (size & I2400M_PLD_SIZE_MASK)); -} - - -/* - * API for the bus-specific drivers - * -------------------------------- - */ - -static inline -struct i2400m *i2400m_get(struct i2400m *i2400m) -{ - dev_hold(i2400m->wimax_dev.net_dev); - return i2400m; -} - -static inline -void i2400m_put(struct i2400m *i2400m) -{ - dev_put(i2400m->wimax_dev.net_dev); -} - -int i2400m_dev_reset_handle(struct i2400m *, const char *); -int i2400m_pre_reset(struct i2400m *); -int i2400m_post_reset(struct i2400m *); -void i2400m_error_recovery(struct i2400m *); - -/* - * _setup()/_release() are called by the probe/disconnect functions of - * the bus-specific drivers. - */ -int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags); -void i2400m_release(struct i2400m *); - -int i2400m_rx(struct i2400m *, struct sk_buff *); -struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); -void i2400m_tx_msg_sent(struct i2400m *); - - -/* - * Utility functions - */ - -static inline -struct device *i2400m_dev(struct i2400m *i2400m) -{ - return i2400m->wimax_dev.net_dev->dev.parent; -} - -int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t); -int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *, - size_t); -struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t); -void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int); -void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *, - size_t); -void i2400m_report_hook_work(struct work_struct *); -int i2400m_cmd_enter_powersave(struct i2400m *); -int i2400m_cmd_exit_idle(struct i2400m *); -struct sk_buff *i2400m_get_device_info(struct i2400m *); -int i2400m_firmware_check(struct i2400m *); -int i2400m_set_idle_timeout(struct i2400m *, unsigned); - -static inline -struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep) -{ - return &iface->cur_altsetting->endpoint[ep].desc; -} - -int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state); -void i2400m_report_tlv_rf_switches_status(struct i2400m *, - const struct i2400m_tlv_rf_switches_status *); - -/* - * Helpers for firmware backwards compatibility - * - * As we aim to support at least the firmware version that was - * released with the previous kernel/driver release, some code will be - * conditionally executed depending on the firmware version. On each - * release, the code to support fw releases past the last two ones - * will be purged. - * - * By making it depend on this macros, it is easier to keep it a tab - * on what has to go and what not. - */ -static inline -unsigned i2400m_le_v1_3(struct i2400m *i2400m) -{ - /* running fw is lower or v1.3 */ - return i2400m->fw_version <= 0x00090001; -} - -static inline -unsigned i2400m_ge_v1_4(struct i2400m *i2400m) -{ - /* running fw is higher or v1.4 */ - return i2400m->fw_version >= 0x00090002; -} - - -/* - * Do a millisecond-sleep for allowing wireshark to dump all the data - * packets. Used only for debugging. - */ -static inline -void __i2400m_msleep(unsigned ms) -{ -#if 1 -#else - msleep(ms); -#endif -} - - -/* module initialization helpers */ -int i2400m_barker_db_init(const char *); -void i2400m_barker_db_exit(void); - - - -#endif /* #ifndef __I2400M_H__ */ diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c deleted file mode 100644 index a7fcbceb6e6b..000000000000 --- a/drivers/net/wimax/i2400m/netdev.c +++ /dev/null @@ -1,603 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless WiMAX Connection 2400m - * Glue with the networking stack - * - * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * This implements an ethernet device for the i2400m. - * - * We fake being an ethernet device to simplify the support from user - * space and from the other side. The world is (sadly) configured to - * take in only Ethernet devices... - * - * Because of this, when using firmwares <= v1.3, there is an - * copy-each-rxed-packet overhead on the RX path. Each IP packet has - * to be reallocated to add an ethernet header (as there is no space - * in what we get from the device). This is a known drawback and - * firmwares >= 1.4 add header space that can be used to insert the - * ethernet header without having to reallocate and copy. - * - * TX error handling is tricky; because we have to FIFO/queue the - * buffers for transmission (as the hardware likes it aggregated), we - * just give the skb to the TX subsystem and by the time it is - * transmitted, we have long forgotten about it. So we just don't care - * too much about it. - * - * Note that when the device is in idle mode with the basestation, we - * need to negotiate coming back up online. That involves negotiation - * and possible user space interaction. Thus, we defer to a workqueue - * to do all that. By default, we only queue a single packet and drop - * the rest, as potentially the time to go back from idle to normal is - * long. - * - * ROADMAP - * - * i2400m_open Called on ifconfig up - * i2400m_stop Called on ifconfig down - * - * i2400m_hard_start_xmit Called by the network stack to send a packet - * i2400m_net_wake_tx Wake up device from basestation-IDLE & TX - * i2400m_wake_tx_work - * i2400m_cmd_exit_idle - * i2400m_tx - * i2400m_net_tx TX a data frame - * i2400m_tx - * - * i2400m_change_mtu Called on ifconfig mtu XXX - * - * i2400m_tx_timeout Called when the device times out - * - * i2400m_net_rx Called by the RX code when a data frame is - * available (firmware <= 1.3) - * i2400m_net_erx Called by the RX code when a data frame is - * available (firmware >= 1.4). - * i2400m_netdev_setup Called to setup all the netdev stuff from - * alloc_netdev. - */ -#include <linux/if_arp.h> -#include <linux/slab.h> -#include <linux/netdevice.h> -#include <linux/ethtool.h> -#include <linux/export.h> -#include "i2400m.h" - - -#define D_SUBMODULE netdev -#include "debug-levels.h" - -enum { -/* netdev interface */ - /* 20 secs? yep, this is the maximum timeout that the device - * might take to get out of IDLE / negotiate it with the base - * station. We add 1sec for good measure. */ - I2400M_TX_TIMEOUT = 21 * HZ, - /* - * Experimentation has determined that, 20 to be a good value - * for minimizing the jitter in the throughput. - */ - I2400M_TX_QLEN = 20, -}; - - -static -int i2400m_open(struct net_device *net_dev) -{ - int result; - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); - /* Make sure we wait until init is complete... */ - mutex_lock(&i2400m->init_mutex); - if (i2400m->updown) - result = 0; - else - result = -EBUSY; - mutex_unlock(&i2400m->init_mutex); - d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", - net_dev, i2400m, result); - return result; -} - - -static -int i2400m_stop(struct net_device *net_dev) -{ - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); - i2400m_net_wake_stop(i2400m); - d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m); - return 0; -} - - -/* - * Wake up the device and transmit a held SKB, then restart the net queue - * - * When the device goes into basestation-idle mode, we need to tell it - * to exit that mode; it will negotiate with the base station, user - * space may have to intervene to rehandshake crypto and then tell us - * when it is ready to transmit the packet we have "queued". Still we - * need to give it sometime after it reports being ok. - * - * On error, there is not much we can do. If the error was on TX, we - * still wake the queue up to see if the next packet will be luckier. - * - * If _cmd_exit_idle() fails...well, it could be many things; most - * commonly it is that something else took the device out of IDLE mode - * (for example, the base station). In that case we get an -EILSEQ and - * we are just going to ignore that one. If the device is back to - * connected, then fine -- if it is someother state, the packet will - * be dropped anyway. - */ -void i2400m_wake_tx_work(struct work_struct *ws) -{ - int result; - struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *skb; - unsigned long flags; - - spin_lock_irqsave(&i2400m->tx_lock, flags); - skb = i2400m->wake_tx_skb; - i2400m->wake_tx_skb = NULL; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - - d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb); - result = -EINVAL; - if (skb == NULL) { - dev_err(dev, "WAKE&TX: skb disappeared!\n"); - goto out_put; - } - /* If we have, somehow, lost the connection after this was - * queued, don't do anything; this might be the device got - * reset or just disconnected. */ - if (unlikely(!netif_carrier_ok(net_dev))) - goto out_kfree; - result = i2400m_cmd_exit_idle(i2400m); - if (result == -EILSEQ) - result = 0; - if (result < 0) { - dev_err(dev, "WAKE&TX: device didn't get out of idle: " - "%d - resetting\n", result); - i2400m_reset(i2400m, I2400M_RT_BUS); - goto error; - } - result = wait_event_timeout(i2400m->state_wq, - i2400m->state != I2400M_SS_IDLE, - net_dev->watchdog_timeo - HZ/2); - if (result == 0) - result = -ETIMEDOUT; - if (result < 0) { - dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " - "%d - resetting\n", result); - i2400m_reset(i2400m, I2400M_RT_BUS); - goto error; - } - msleep(20); /* device still needs some time or it drops it */ - result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); -error: - netif_wake_queue(net_dev); -out_kfree: - kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ -out_put: - i2400m_put(i2400m); - d_fnend(3, dev, "(ws %p i2400m %p skb %p) = void [%d]\n", - ws, i2400m, skb, result); -} - - -/* - * Prepare the data payload TX header - * - * The i2400m expects a 4 byte header in front of a data packet. - * - * Because we pretend to be an ethernet device, this packet comes with - * an ethernet header. Pull it and push our header. - */ -static -void i2400m_tx_prep_header(struct sk_buff *skb) -{ - struct i2400m_pl_data_hdr *pl_hdr; - skb_pull(skb, ETH_HLEN); - pl_hdr = skb_push(skb, sizeof(*pl_hdr)); - pl_hdr->reserved = 0; -} - - - -/* - * Cleanup resources acquired during i2400m_net_wake_tx() - * - * This is called by __i2400m_dev_stop and means we have to make sure - * the workqueue is flushed from any pending work. - */ -void i2400m_net_wake_stop(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *wake_tx_skb; - unsigned long flags; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - /* - * See i2400m_hard_start_xmit(), references are taken there and - * here we release them if the packet was still pending. - */ - cancel_work_sync(&i2400m->wake_tx_ws); - - spin_lock_irqsave(&i2400m->tx_lock, flags); - wake_tx_skb = i2400m->wake_tx_skb; - i2400m->wake_tx_skb = NULL; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - - if (wake_tx_skb) { - i2400m_put(i2400m); - kfree_skb(wake_tx_skb); - } - - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} - - -/* - * TX an skb to an idle device - * - * When the device is in basestation-idle mode, we need to wake it up - * and then TX. So we queue a work_struct for doing so. - * - * We need to get an extra ref for the skb (so it is not dropped), as - * well as be careful not to queue more than one request (won't help - * at all). If more than one request comes or there are errors, we - * just drop the packets (see i2400m_hard_start_xmit()). - */ -static -int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev, - struct sk_buff *skb) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - unsigned long flags; - - d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); - if (net_ratelimit()) { - d_printf(3, dev, "WAKE&NETTX: " - "skb %p sending %d bytes to radio\n", - skb, skb->len); - d_dump(4, dev, skb->data, skb->len); - } - /* We hold a ref count for i2400m and skb, so when - * stopping() the device, we need to cancel that work - * and if pending, release those resources. */ - result = 0; - spin_lock_irqsave(&i2400m->tx_lock, flags); - if (!i2400m->wake_tx_skb) { - netif_stop_queue(net_dev); - i2400m_get(i2400m); - i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ - i2400m_tx_prep_header(skb); - result = schedule_work(&i2400m->wake_tx_ws); - WARN_ON(result == 0); - } - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - if (result == 0) { - /* Yes, this happens even if we stopped the - * queue -- blame the queue disciplines that - * queue without looking -- I guess there is a reason - * for that. */ - if (net_ratelimit()) - d_printf(1, dev, "NETTX: device exiting idle, " - "dropping skb %p, queue running %d\n", - skb, netif_queue_stopped(net_dev)); - result = -EBUSY; - } - d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); - return result; -} - - -/* - * Transmit a packet to the base station on behalf of the network stack. - * - * Returns: 0 if ok, < 0 errno code on error. - * - * We need to pull the ethernet header and add the hardware header, - * which is currently set to all zeroes and reserved. - */ -static -int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev, - struct sk_buff *skb) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n", - i2400m, net_dev, skb); - /* FIXME: check eth hdr, only IPv4 is routed by the device as of now */ - netif_trans_update(net_dev); - i2400m_tx_prep_header(skb); - d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n", - skb, skb->len); - d_dump(4, dev, skb->data, skb->len); - result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); - d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n", - i2400m, net_dev, skb, result); - return result; -} - - -/* - * Transmit a packet to the base station on behalf of the network stack - * - * - * Returns: NETDEV_TX_OK (always, even in case of error) - * - * In case of error, we just drop it. Reasons: - * - * - we add a hw header to each skb, and if the network stack - * retries, we have no way to know if that skb has it or not. - * - * - network protocols have their own drop-recovery mechanisms - * - * - there is not much else we can do - * - * If the device is idle, we need to wake it up; that is an operation - * that will sleep. See i2400m_net_wake_tx() for details. - */ -static -netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, - struct net_device *net_dev) -{ - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - struct device *dev = i2400m_dev(i2400m); - int result = -1; - - d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); - - if (skb_cow_head(skb, 0)) - goto drop; - - if (i2400m->state == I2400M_SS_IDLE) - result = i2400m_net_wake_tx(i2400m, net_dev, skb); - else - result = i2400m_net_tx(i2400m, net_dev, skb); - if (result < 0) { -drop: - net_dev->stats.tx_dropped++; - } else { - net_dev->stats.tx_packets++; - net_dev->stats.tx_bytes += skb->len; - } - dev_kfree_skb(skb); - d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); - return NETDEV_TX_OK; -} - - -static -void i2400m_tx_timeout(struct net_device *net_dev, unsigned int txqueue) -{ - /* - * We might want to kick the device - * - * There is not much we can do though, as the device requires - * that we send the data aggregated. By the time we receive - * this, there might be data pending to be sent or not... - */ - net_dev->stats.tx_errors++; -} - - -/* - * Create a fake ethernet header - * - * For emulating an ethernet device, every received IP header has to - * be prefixed with an ethernet header. Fake it with the given - * protocol. - */ -static -void i2400m_rx_fake_eth_header(struct net_device *net_dev, - void *_eth_hdr, __be16 protocol) -{ - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - struct ethhdr *eth_hdr = _eth_hdr; - - memcpy(eth_hdr->h_dest, net_dev->dev_addr, sizeof(eth_hdr->h_dest)); - memcpy(eth_hdr->h_source, i2400m->src_mac_addr, - sizeof(eth_hdr->h_source)); - eth_hdr->h_proto = protocol; -} - - -/* - * i2400m_net_rx - pass a network packet to the stack - * - * @i2400m: device instance - * @skb_rx: the skb where the buffer pointed to by @buf is - * @i: 1 if payload is the only one - * @buf: pointer to the buffer containing the data - * @len: buffer's length - * - * This is only used now for the v1.3 firmware. It will be deprecated - * in >= 2.6.31. - * - * Note that due to firmware limitations, we don't have space to add - * an ethernet header, so we need to copy each packet. Firmware - * versions >= v1.4 fix this [see i2400m_net_erx()]. - * - * We just clone the skb and set it up so that it's skb->data pointer - * points to "buf" and it's length. - * - * Note that if the payload is the last (or the only one) in a - * multi-payload message, we don't clone the SKB but just reuse it. - * - * This function is normally run from a thread context. However, we - * still use netif_rx() instead of netif_receive_skb() as was - * recommended in the mailing list. Reason is in some stress tests - * when sending/receiving a lot of data we seem to hit a softlock in - * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using - * netif_rx() took care of the issue. - * - * This is, of course, still open to do more research on why running - * with netif_receive_skb() hits this softlock. FIXME. - * - * FIXME: currently we don't do any efforts at distinguishing if what - * we got was an IPv4 or IPv6 header, to setup the protocol field - * correctly. - */ -void i2400m_net_rx(struct i2400m *i2400m, struct sk_buff *skb_rx, - unsigned i, const void *buf, int buf_len) -{ - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *skb; - - d_fnstart(2, dev, "(i2400m %p buf %p buf_len %d)\n", - i2400m, buf, buf_len); - if (i) { - skb = skb_get(skb_rx); - d_printf(2, dev, "RX: reusing first payload skb %p\n", skb); - skb_pull(skb, buf - (void *) skb->data); - skb_trim(skb, (void *) skb_end_pointer(skb) - buf); - } else { - /* Yes, this is bad -- a lot of overhead -- see - * comments at the top of the file */ - skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL); - if (skb == NULL) { - dev_err(dev, "NETRX: no memory to realloc skb\n"); - net_dev->stats.rx_dropped++; - goto error_skb_realloc; - } - skb_put_data(skb, buf, buf_len); - } - i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev, - skb->data - ETH_HLEN, - cpu_to_be16(ETH_P_IP)); - skb_set_mac_header(skb, -ETH_HLEN); - skb->dev = i2400m->wimax_dev.net_dev; - skb->protocol = htons(ETH_P_IP); - net_dev->stats.rx_packets++; - net_dev->stats.rx_bytes += buf_len; - d_printf(3, dev, "NETRX: receiving %d bytes to network stack\n", - buf_len); - d_dump(4, dev, buf, buf_len); - netif_rx_ni(skb); /* see notes in function header */ -error_skb_realloc: - d_fnend(2, dev, "(i2400m %p buf %p buf_len %d) = void\n", - i2400m, buf, buf_len); -} - - -/* - * i2400m_net_erx - pass a network packet to the stack (extended version) - * - * @i2400m: device descriptor - * @skb: the skb where the packet is - the skb should be set to point - * at the IP packet; this function will add ethernet headers if - * needed. - * @cs: packet type - * - * This is only used now for firmware >= v1.4. Note it is quite - * similar to i2400m_net_rx() (used only for v1.3 firmware). - * - * This function is normally run from a thread context. However, we - * still use netif_rx() instead of netif_receive_skb() as was - * recommended in the mailing list. Reason is in some stress tests - * when sending/receiving a lot of data we seem to hit a softlock in - * the kernel's TCP implementation [aroudn tcp_delay_timer()]. Using - * netif_rx() took care of the issue. - * - * This is, of course, still open to do more research on why running - * with netif_receive_skb() hits this softlock. FIXME. - */ -void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb, - enum i2400m_cs cs) -{ - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(2, dev, "(i2400m %p skb %p [%u] cs %d)\n", - i2400m, skb, skb->len, cs); - switch(cs) { - case I2400M_CS_IPV4_0: - case I2400M_CS_IPV4: - i2400m_rx_fake_eth_header(i2400m->wimax_dev.net_dev, - skb->data - ETH_HLEN, - cpu_to_be16(ETH_P_IP)); - skb_set_mac_header(skb, -ETH_HLEN); - skb->dev = i2400m->wimax_dev.net_dev; - skb->protocol = htons(ETH_P_IP); - net_dev->stats.rx_packets++; - net_dev->stats.rx_bytes += skb->len; - break; - default: - dev_err(dev, "ERX: BUG? CS type %u unsupported\n", cs); - goto error; - - } - d_printf(3, dev, "ERX: receiving %d bytes to the network stack\n", - skb->len); - d_dump(4, dev, skb->data, skb->len); - netif_rx_ni(skb); /* see notes in function header */ -error: - d_fnend(2, dev, "(i2400m %p skb %p [%u] cs %d) = void\n", - i2400m, skb, skb->len, cs); -} - -static const struct net_device_ops i2400m_netdev_ops = { - .ndo_open = i2400m_open, - .ndo_stop = i2400m_stop, - .ndo_start_xmit = i2400m_hard_start_xmit, - .ndo_tx_timeout = i2400m_tx_timeout, -}; - -static void i2400m_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *info) -{ - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->fw_version, i2400m->fw_name ? : "", - sizeof(info->fw_version)); - if (net_dev->dev.parent) - strlcpy(info->bus_info, dev_name(net_dev->dev.parent), - sizeof(info->bus_info)); -} - -static const struct ethtool_ops i2400m_ethtool_ops = { - .get_drvinfo = i2400m_get_drvinfo, - .get_link = ethtool_op_get_link, -}; - -/** - * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data - * - * Called by alloc_netdev() - */ -void i2400m_netdev_setup(struct net_device *net_dev) -{ - d_fnstart(3, NULL, "(net_dev %p)\n", net_dev); - ether_setup(net_dev); - net_dev->mtu = I2400M_MAX_MTU; - net_dev->min_mtu = 0; - net_dev->max_mtu = I2400M_MAX_MTU; - net_dev->tx_queue_len = I2400M_TX_QLEN; - net_dev->features = - NETIF_F_VLAN_CHALLENGED - | NETIF_F_HIGHDMA; - net_dev->flags = - IFF_NOARP /* i2400m is apure IP device */ - & (~IFF_BROADCAST /* i2400m is P2P */ - & ~IFF_MULTICAST); - net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; - net_dev->netdev_ops = &i2400m_netdev_ops; - net_dev->ethtool_ops = &i2400m_ethtool_ops; - d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); -} -EXPORT_SYMBOL_GPL(i2400m_netdev_setup); - diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c deleted file mode 100644 index 5c79f052cad2..000000000000 --- a/drivers/net/wimax/i2400m/op-rfkill.c +++ /dev/null @@ -1,196 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless WiMAX Connection 2400m - * Implement backend for the WiMAX stack rfkill support - * - * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - * The WiMAX kernel stack integrates into RF-Kill and keeps the - * switches's status. We just need to: - * - * - report changes in the HW RF Kill switch [with - * wimax_rfkill_{sw,hw}_report(), which happens when we detect those - * indications coming through hardware reports]. We also do it on - * initialization to let the stack know the initial HW state. - * - * - implement indications from the stack to change the SW RF Kill - * switch (coming from sysfs, the wimax stack or user space). - */ -#include "i2400m.h" -#include <linux/wimax/i2400m.h> -#include <linux/slab.h> - - - -#define D_SUBMODULE rfkill -#include "debug-levels.h" - -/* - * Return true if the i2400m radio is in the requested wimax_rf_state state - * - */ -static -int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state) -{ - if (state == WIMAX_RF_OFF) - return i2400m->state == I2400M_SS_RF_OFF - || i2400m->state == I2400M_SS_RF_SHUTDOWN; - else if (state == WIMAX_RF_ON) - /* state == WIMAX_RF_ON */ - return i2400m->state != I2400M_SS_RF_OFF - && i2400m->state != I2400M_SS_RF_SHUTDOWN; - else { - BUG(); - return -EINVAL; /* shut gcc warnings on certain arches */ - } -} - - -/* - * WiMAX stack operation: implement SW RFKill toggling - * - * @wimax_dev: device descriptor - * @skb: skb where the message has been received; skb->data is - * expected to point to the message payload. - * @genl_info: passed by the generic netlink layer - * - * Generic Netlink will call this function when a message is sent from - * userspace to change the software RF-Kill switch status. - * - * This function will set the device's software RF-Kill switch state to - * match what is requested. - * - * NOTE: the i2400m has a strict state machine; we can only set the - * RF-Kill switch when it is on, the HW RF-Kill is on and the - * device is initialized. So we ignore errors steaming from not - * being in the right state (-EILSEQ). - */ -int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev, - enum wimax_rf_state state) -{ - int result; - struct i2400m *i2400m = wimax_dev_to_i2400m(wimax_dev); - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *ack_skb; - struct { - struct i2400m_l3l4_hdr hdr; - struct i2400m_tlv_rf_operation sw_rf; - } __packed *cmd; - char strerr[32]; - - d_fnstart(4, dev, "(wimax_dev %p state %d)\n", wimax_dev, state); - - result = -ENOMEM; - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); - if (cmd == NULL) - goto error_alloc; - cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL); - cmd->hdr.length = sizeof(cmd->sw_rf); - cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION); - cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION); - cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status)); - switch (state) { - case WIMAX_RF_OFF: /* RFKILL ON, radio OFF */ - cmd->sw_rf.status = cpu_to_le32(2); - break; - case WIMAX_RF_ON: /* RFKILL OFF, radio ON */ - cmd->sw_rf.status = cpu_to_le32(1); - break; - default: - BUG(); - } - - ack_skb = i2400m_msg_to_dev(i2400m, cmd, sizeof(*cmd)); - result = PTR_ERR(ack_skb); - if (IS_ERR(ack_skb)) { - dev_err(dev, "Failed to issue 'RF Control' command: %d\n", - result); - goto error_msg_to_dev; - } - result = i2400m_msg_check_status(wimax_msg_data(ack_skb), - strerr, sizeof(strerr)); - if (result < 0) { - dev_err(dev, "'RF Control' (0x%04x) command failed: %d - %s\n", - I2400M_MT_CMD_RF_CONTROL, result, strerr); - goto error_cmd; - } - - /* Now we wait for the state to change to RADIO_OFF or RADIO_ON */ - result = wait_event_timeout( - i2400m->state_wq, i2400m_radio_is(i2400m, state), - 5 * HZ); - if (result == 0) - result = -ETIMEDOUT; - if (result < 0) - dev_err(dev, "Error waiting for device to toggle RF state: " - "%d\n", result); - result = 0; -error_cmd: - kfree_skb(ack_skb); -error_msg_to_dev: -error_alloc: - d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n", - wimax_dev, state, result); - kfree(cmd); - return result; -} - - -/* - * Inform the WiMAX stack of changes in the RF Kill switches reported - * by the device - * - * @i2400m: device descriptor - * @rfss: TLV for RF Switches status; already validated - * - * NOTE: the reports on RF switch status cannot be trusted - * or used until the device is in a state of RADIO_OFF - * or greater. - */ -void i2400m_report_tlv_rf_switches_status( - struct i2400m *i2400m, - const struct i2400m_tlv_rf_switches_status *rfss) -{ - struct device *dev = i2400m_dev(i2400m); - enum i2400m_rf_switch_status hw, sw; - enum wimax_st wimax_state; - - sw = le32_to_cpu(rfss->sw_rf_switch); - hw = le32_to_cpu(rfss->hw_rf_switch); - - d_fnstart(3, dev, "(i2400m %p rfss %p [hw %u sw %u])\n", - i2400m, rfss, hw, sw); - /* We only process rw switch evens when the device has been - * fully initialized */ - wimax_state = wimax_state_get(&i2400m->wimax_dev); - if (wimax_state < WIMAX_ST_RADIO_OFF) { - d_printf(3, dev, "ignoring RF switches report, state %u\n", - wimax_state); - goto out; - } - switch (sw) { - case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */ - wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_ON); - break; - case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */ - wimax_report_rfkill_sw(&i2400m->wimax_dev, WIMAX_RF_OFF); - break; - default: - dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw); - } - - switch (hw) { - case I2400M_RF_SWITCH_ON: /* RF Kill disabled (radio on) */ - wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_ON); - break; - case I2400M_RF_SWITCH_OFF: /* RF Kill enabled (radio off) */ - wimax_report_rfkill_hw(&i2400m->wimax_dev, WIMAX_RF_OFF); - break; - default: - dev_err(dev, "HW BUG? Unknown RF HW state 0x%x\n", hw); - } -out: - d_fnend(3, dev, "(i2400m %p rfss %p [hw %u sw %u]) = void\n", - i2400m, rfss, hw, sw); -} diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c deleted file mode 100644 index c9fb619a9e01..000000000000 --- a/drivers/net/wimax/i2400m/rx.c +++ /dev/null @@ -1,1395 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * Handle incoming traffic and deliver it to the control or data planes - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - Initial implementation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Use skb_clone(), break up processing in chunks - * - Split transport/device specific - * - Make buffer size dynamic to exert less memory pressure - * - RX reorder support - * - * This handles the RX path. - * - * We receive an RX message from the bus-specific driver, which - * contains one or more payloads that have potentially different - * destinataries (data or control paths). - * - * So we just take that payload from the transport specific code in - * the form of an skb, break it up in chunks (a cloned skb each in the - * case of network packets) and pass it to netdev or to the - * command/ack handler (and from there to the WiMAX stack). - * - * PROTOCOL FORMAT - * - * The format of the buffer is: - * - * HEADER (struct i2400m_msg_hdr) - * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld) - * PAYLOAD DESCRIPTOR 1 - * ... - * PAYLOAD DESCRIPTOR N - * PAYLOAD 0 (raw bytes) - * PAYLOAD 1 - * ... - * PAYLOAD N - * - * See tx.c for a deeper description on alignment requirements and - * other fun facts of it. - * - * DATA PACKETS - * - * In firmwares <= v1.3, data packets have no header for RX, but they - * do for TX (currently unused). - * - * In firmware >= 1.4, RX packets have an extended header (16 - * bytes). This header conveys information for management of host - * reordering of packets (the device offloads storage of the packets - * for reordering to the host). Read below for more information. - * - * The header is used as dummy space to emulate an ethernet header and - * thus be able to act as an ethernet device without having to reallocate. - * - * DATA RX REORDERING - * - * Starting in firmware v1.4, the device can deliver packets for - * delivery with special reordering information; this allows it to - * more effectively do packet management when some frames were lost in - * the radio traffic. - * - * Thus, for RX packets that come out of order, the device gives the - * driver enough information to queue them properly and then at some - * point, the signal to deliver the whole (or part) of the queued - * packets to the networking stack. There are 16 such queues. - * - * This only happens when a packet comes in with the "need reorder" - * flag set in the RX header. When such bit is set, the following - * operations might be indicated: - * - * - reset queue: send all queued packets to the OS - * - * - queue: queue a packet - * - * - update ws: update the queue's window start and deliver queued - * packets that meet the criteria - * - * - queue & update ws: queue a packet, update the window start and - * deliver queued packets that meet the criteria - * - * (delivery criteria: the packet's [normalized] sequence number is - * lower than the new [normalized] window start). - * - * See the i2400m_roq_*() functions for details. - * - * ROADMAP - * - * i2400m_rx - * i2400m_rx_msg_hdr_check - * i2400m_rx_pl_descr_check - * i2400m_rx_payload - * i2400m_net_rx - * i2400m_rx_edata - * i2400m_net_erx - * i2400m_roq_reset - * i2400m_net_erx - * i2400m_roq_queue - * __i2400m_roq_queue - * i2400m_roq_update_ws - * __i2400m_roq_update_ws - * i2400m_net_erx - * i2400m_roq_queue_update_ws - * __i2400m_roq_queue - * __i2400m_roq_update_ws - * i2400m_net_erx - * i2400m_rx_ctl - * i2400m_msg_size_check - * i2400m_report_hook_work [in a workqueue] - * i2400m_report_hook - * wimax_msg_to_user - * i2400m_rx_ctl_ack - * wimax_msg_to_user_alloc - * i2400m_rx_trace - * i2400m_msg_size_check - * wimax_msg - */ -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/if_arp.h> -#include <linux/netdevice.h> -#include <linux/workqueue.h> -#include <linux/export.h> -#include <linux/moduleparam.h> -#include "i2400m.h" - - -#define D_SUBMODULE rx -#include "debug-levels.h" - -static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */ -module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644); -MODULE_PARM_DESC(rx_reorder_disabled, - "If true, RX reordering will be disabled."); - -struct i2400m_report_hook_args { - struct sk_buff *skb_rx; - const struct i2400m_l3l4_hdr *l3l4_hdr; - size_t size; - struct list_head list_node; -}; - - -/* - * Execute i2400m_report_hook in a workqueue - * - * Goes over the list of queued reports in i2400m->rx_reports and - * processes them. - * - * NOTE: refcounts on i2400m are not needed because we flush the - * workqueue this runs on (i2400m->work_queue) before destroying - * i2400m. - */ -void i2400m_report_hook_work(struct work_struct *ws) -{ - struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws); - struct device *dev = i2400m_dev(i2400m); - struct i2400m_report_hook_args *args, *args_next; - LIST_HEAD(list); - unsigned long flags; - - while (1) { - spin_lock_irqsave(&i2400m->rx_lock, flags); - list_splice_init(&i2400m->rx_reports, &list); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - if (list_empty(&list)) - break; - else - d_printf(1, dev, "processing queued reports\n"); - list_for_each_entry_safe(args, args_next, &list, list_node) { - d_printf(2, dev, "processing queued report %p\n", args); - i2400m_report_hook(i2400m, args->l3l4_hdr, args->size); - kfree_skb(args->skb_rx); - list_del(&args->list_node); - kfree(args); - } - } -} - - -/* - * Flush the list of queued reports - */ -static -void i2400m_report_hook_flush(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - struct i2400m_report_hook_args *args, *args_next; - LIST_HEAD(list); - unsigned long flags; - - d_printf(1, dev, "flushing queued reports\n"); - spin_lock_irqsave(&i2400m->rx_lock, flags); - list_splice_init(&i2400m->rx_reports, &list); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - list_for_each_entry_safe(args, args_next, &list, list_node) { - d_printf(2, dev, "flushing queued report %p\n", args); - kfree_skb(args->skb_rx); - list_del(&args->list_node); - kfree(args); - } -} - - -/* - * Queue a report for later processing - * - * @i2400m: device descriptor - * @skb_rx: skb that contains the payload (for reference counting) - * @l3l4_hdr: pointer to the control - * @size: size of the message - */ -static -void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx, - const void *l3l4_hdr, size_t size) -{ - struct device *dev = i2400m_dev(i2400m); - unsigned long flags; - struct i2400m_report_hook_args *args; - - args = kzalloc(sizeof(*args), GFP_NOIO); - if (args) { - args->skb_rx = skb_get(skb_rx); - args->l3l4_hdr = l3l4_hdr; - args->size = size; - spin_lock_irqsave(&i2400m->rx_lock, flags); - list_add_tail(&args->list_node, &i2400m->rx_reports); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - d_printf(2, dev, "queued report %p\n", args); - rmb(); /* see i2400m->ready's documentation */ - if (likely(i2400m->ready)) /* only send if up */ - queue_work(i2400m->work_queue, &i2400m->rx_report_ws); - } else { - if (printk_ratelimit()) - dev_err(dev, "%s:%u: Can't allocate %zu B\n", - __func__, __LINE__, sizeof(*args)); - } -} - - -/* - * Process an ack to a command - * - * @i2400m: device descriptor - * @payload: pointer to message - * @size: size of the message - * - * Pass the acknodledgment (in an skb) to the thread that is waiting - * for it in i2400m->msg_completion. - * - * We need to coordinate properly with the thread waiting for the - * ack. Check if it is waiting or if it is gone. We loose the spinlock - * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC, - * but this is not so speed critical). - */ -static -void i2400m_rx_ctl_ack(struct i2400m *i2400m, - const void *payload, size_t size) -{ - struct device *dev = i2400m_dev(i2400m); - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - unsigned long flags; - struct sk_buff *ack_skb; - - /* Anyone waiting for an answer? */ - spin_lock_irqsave(&i2400m->rx_lock, flags); - if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { - dev_err(dev, "Huh? reply to command with no waiters\n"); - goto error_no_waiter; - } - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - - ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL); - - /* Check waiter didn't time out waiting for the answer... */ - spin_lock_irqsave(&i2400m->rx_lock, flags); - if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) { - d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); - goto error_waiter_cancelled; - } - if (IS_ERR(ack_skb)) - dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); - i2400m->ack_skb = ack_skb; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - complete(&i2400m->msg_completion); - return; - -error_waiter_cancelled: - if (!IS_ERR(ack_skb)) - kfree_skb(ack_skb); -error_no_waiter: - spin_unlock_irqrestore(&i2400m->rx_lock, flags); -} - - -/* - * Receive and process a control payload - * - * @i2400m: device descriptor - * @skb_rx: skb that contains the payload (for reference counting) - * @payload: pointer to message - * @size: size of the message - * - * There are two types of control RX messages: reports (asynchronous, - * like your every day interrupts) and 'acks' (reponses to a command, - * get or set request). - * - * If it is a report, we run hooks on it (to extract information for - * things we need to do in the driver) and then pass it over to the - * WiMAX stack to send it to user space. - * - * NOTE: report processing is done in a workqueue specific to the - * generic driver, to avoid deadlocks in the system. - * - * If it is not a report, it is an ack to a previously executed - * command, set or get, so wake up whoever is waiting for it from - * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that. - * - * Note that the sizes we pass to other functions from here are the - * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have - * verified in _msg_size_check() that they are congruent. - * - * For reports: We can't clone the original skb where the data is - * because we need to send this up via netlink; netlink has to add - * headers and we can't overwrite what's preceding the payload...as - * it is another message. So we just dup them. - */ -static -void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx, - const void *payload, size_t size) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_l3l4_hdr *l3l4_hdr = payload; - unsigned msg_type; - - result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); - if (result < 0) { - dev_err(dev, "HW BUG? device sent a bad message: %d\n", - result); - goto error_check; - } - msg_type = le16_to_cpu(l3l4_hdr->type); - d_printf(1, dev, "%s 0x%04x: %zu bytes\n", - msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", - msg_type, size); - d_dump(2, dev, l3l4_hdr, size); - if (msg_type & I2400M_MT_REPORT_MASK) { - /* - * Process each report - * - * - has to be ran serialized as well - * - * - the handling might force the execution of - * commands. That might cause reentrancy issues with - * bus-specific subdrivers and workqueues, so the we - * run it in a separate workqueue. - * - * - when the driver is not yet ready to handle them, - * they are queued and at some point the queue is - * restarted [NOTE: we can't queue SKBs directly, as - * this might be a piece of a SKB, not the whole - * thing, and this is cheaper than cloning the - * SKB]. - * - * Note we don't do refcounting for the device - * structure; this is because before destroying - * 'i2400m', we make sure to flush the - * i2400m->work_queue, so there are no issues. - */ - i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size); - if (unlikely(i2400m->trace_msg_from_user)) - wimax_msg(&i2400m->wimax_dev, "echo", - l3l4_hdr, size, GFP_KERNEL); - result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size, - GFP_KERNEL); - if (result < 0) - dev_err(dev, "error sending report to userspace: %d\n", - result); - } else /* an ack to a CMD, GET or SET */ - i2400m_rx_ctl_ack(i2400m, payload, size); -error_check: - return; -} - - -/* - * Receive and send up a trace - * - * @i2400m: device descriptor - * @skb_rx: skb that contains the trace (for reference counting) - * @payload: pointer to trace message inside the skb - * @size: size of the message - * - * THe i2400m might produce trace information (diagnostics) and we - * send them through a different kernel-to-user pipe (to avoid - * clogging it). - * - * As in i2400m_rx_ctl(), we can't clone the original skb where the - * data is because we need to send this up via netlink; netlink has to - * add headers and we can't overwrite what's preceding the - * payload...as it is another message. So we just dup them. - */ -static -void i2400m_rx_trace(struct i2400m *i2400m, - const void *payload, size_t size) -{ - int result; - struct device *dev = i2400m_dev(i2400m); - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - const struct i2400m_l3l4_hdr *l3l4_hdr = payload; - unsigned msg_type; - - result = i2400m_msg_size_check(i2400m, l3l4_hdr, size); - if (result < 0) { - dev_err(dev, "HW BUG? device sent a bad trace message: %d\n", - result); - goto error_check; - } - msg_type = le16_to_cpu(l3l4_hdr->type); - d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n", - msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", - msg_type, size); - d_dump(2, dev, l3l4_hdr, size); - result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL); - if (result < 0) - dev_err(dev, "error sending trace to userspace: %d\n", - result); -error_check: - return; -} - - -/* - * Reorder queue data stored on skb->cb while the skb is queued in the - * reorder queues. - */ -struct i2400m_roq_data { - unsigned sn; /* Serial number for the skb */ - enum i2400m_cs cs; /* packet type for the skb */ -}; - - -/* - * ReOrder Queue - * - * @ws: Window Start; sequence number where the current window start - * is for this queue - * @queue: the skb queue itself - * @log: circular ring buffer used to log information about the - * reorder process in this queue that can be displayed in case of - * error to help diagnose it. - * - * This is the head for a list of skbs. In the skb->cb member of the - * skb when queued here contains a 'struct i2400m_roq_data' were we - * store the sequence number (sn) and the cs (packet type) coming from - * the RX payload header from the device. - */ -struct i2400m_roq -{ - unsigned ws; - struct sk_buff_head queue; - struct i2400m_roq_log *log; -}; - - -static -void __i2400m_roq_init(struct i2400m_roq *roq) -{ - roq->ws = 0; - skb_queue_head_init(&roq->queue); -} - - -static -unsigned __i2400m_roq_index(struct i2400m *i2400m, struct i2400m_roq *roq) -{ - return ((unsigned long) roq - (unsigned long) i2400m->rx_roq) - / sizeof(*roq); -} - - -/* - * Normalize a sequence number based on the queue's window start - * - * nsn = (sn - ws) % 2048 - * - * Note that if @sn < @roq->ws, we still need a positive number; %'s - * sign is implementation specific, so we normalize it by adding 2048 - * to bring it to be positive. - */ -static -unsigned __i2400m_roq_nsn(struct i2400m_roq *roq, unsigned sn) -{ - int r; - r = ((int) sn - (int) roq->ws) % 2048; - if (r < 0) - r += 2048; - return r; -} - - -/* - * Circular buffer to keep the last N reorder operations - * - * In case something fails, dumb then to try to come up with what - * happened. - */ -enum { - I2400M_ROQ_LOG_LENGTH = 32, -}; - -struct i2400m_roq_log { - struct i2400m_roq_log_entry { - enum i2400m_ro_type type; - unsigned ws, count, sn, nsn, new_ws; - } entry[I2400M_ROQ_LOG_LENGTH]; - unsigned in, out; -}; - - -/* Print a log entry */ -static -void i2400m_roq_log_entry_print(struct i2400m *i2400m, unsigned index, - unsigned e_index, - struct i2400m_roq_log_entry *e) -{ - struct device *dev = i2400m_dev(i2400m); - - switch(e->type) { - case I2400M_RO_TYPE_RESET: - dev_err(dev, "q#%d reset ws %u cnt %u sn %u/%u" - " - new nws %u\n", - index, e->ws, e->count, e->sn, e->nsn, e->new_ws); - break; - case I2400M_RO_TYPE_PACKET: - dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n", - index, e->ws, e->count, e->sn, e->nsn); - break; - case I2400M_RO_TYPE_WS: - dev_err(dev, "q#%d update_ws ws %u cnt %u sn %u/%u" - " - new nws %u\n", - index, e->ws, e->count, e->sn, e->nsn, e->new_ws); - break; - case I2400M_RO_TYPE_PACKET_WS: - dev_err(dev, "q#%d queue_update_ws ws %u cnt %u sn %u/%u" - " - new nws %u\n", - index, e->ws, e->count, e->sn, e->nsn, e->new_ws); - break; - default: - dev_err(dev, "q#%d BUG? entry %u - unknown type %u\n", - index, e_index, e->type); - break; - } -} - - -static -void i2400m_roq_log_add(struct i2400m *i2400m, - struct i2400m_roq *roq, enum i2400m_ro_type type, - unsigned ws, unsigned count, unsigned sn, - unsigned nsn, unsigned new_ws) -{ - struct i2400m_roq_log_entry *e; - unsigned cnt_idx; - int index = __i2400m_roq_index(i2400m, roq); - - /* if we run out of space, we eat from the end */ - if (roq->log->in - roq->log->out == I2400M_ROQ_LOG_LENGTH) - roq->log->out++; - cnt_idx = roq->log->in++ % I2400M_ROQ_LOG_LENGTH; - e = &roq->log->entry[cnt_idx]; - - e->type = type; - e->ws = ws; - e->count = count; - e->sn = sn; - e->nsn = nsn; - e->new_ws = new_ws; - - if (d_test(1)) - i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); -} - - -/* Dump all the entries in the FIFO and reinitialize it */ -static -void i2400m_roq_log_dump(struct i2400m *i2400m, struct i2400m_roq *roq) -{ - unsigned cnt, cnt_idx; - struct i2400m_roq_log_entry *e; - int index = __i2400m_roq_index(i2400m, roq); - - BUG_ON(roq->log->out > roq->log->in); - for (cnt = roq->log->out; cnt < roq->log->in; cnt++) { - cnt_idx = cnt % I2400M_ROQ_LOG_LENGTH; - e = &roq->log->entry[cnt_idx]; - i2400m_roq_log_entry_print(i2400m, index, cnt_idx, e); - memset(e, 0, sizeof(*e)); - } - roq->log->in = roq->log->out = 0; -} - - -/* - * Backbone for the queuing of an skb (by normalized sequence number) - * - * @i2400m: device descriptor - * @roq: reorder queue where to add - * @skb: the skb to add - * @sn: the sequence number of the skb - * @nsn: the normalized sequence number of the skb (pre-computed by the - * caller from the @sn and @roq->ws). - * - * We try first a couple of quick cases: - * - * - the queue is empty - * - the skb would be appended to the queue - * - * These will be the most common operations. - * - * If these fail, then we have to do a sorted insertion in the queue, - * which is the slowest path. - * - * We don't have to acquire a reference count as we are going to own it. - */ -static -void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, - struct sk_buff *skb, unsigned sn, unsigned nsn) -{ - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *skb_itr; - struct i2400m_roq_data *roq_data_itr, *roq_data; - unsigned nsn_itr; - - d_fnstart(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %u)\n", - i2400m, roq, skb, sn, nsn); - - roq_data = (struct i2400m_roq_data *) &skb->cb; - BUILD_BUG_ON(sizeof(*roq_data) > sizeof(skb->cb)); - roq_data->sn = sn; - d_printf(3, dev, "ERX: roq %p [ws %u] nsn %d sn %u\n", - roq, roq->ws, nsn, roq_data->sn); - - /* Queues will be empty on not-so-bad environments, so try - * that first */ - if (skb_queue_empty(&roq->queue)) { - d_printf(2, dev, "ERX: roq %p - first one\n", roq); - __skb_queue_head(&roq->queue, skb); - goto out; - } - /* Now try append, as most of the operations will be that */ - skb_itr = skb_peek_tail(&roq->queue); - roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; - nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); - /* NSN bounds assumed correct (checked when it was queued) */ - if (nsn >= nsn_itr) { - d_printf(2, dev, "ERX: roq %p - appended after %p (nsn %d sn %u)\n", - roq, skb_itr, nsn_itr, roq_data_itr->sn); - __skb_queue_tail(&roq->queue, skb); - goto out; - } - /* None of the fast paths option worked. Iterate to find the - * right spot where to insert the packet; we know the queue is - * not empty, so we are not the first ones; we also know we - * are not going to be the last ones. The list is sorted, so - * we have to insert before the the first guy with an nsn_itr - * greater that our nsn. */ - skb_queue_walk(&roq->queue, skb_itr) { - roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; - nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); - /* NSN bounds assumed correct (checked when it was queued) */ - if (nsn_itr > nsn) { - d_printf(2, dev, "ERX: roq %p - queued before %p " - "(nsn %d sn %u)\n", roq, skb_itr, nsn_itr, - roq_data_itr->sn); - __skb_queue_before(&roq->queue, skb_itr, skb); - goto out; - } - } - /* If we get here, that is VERY bad -- print info to help - * diagnose and crash it */ - dev_err(dev, "SW BUG? failed to insert packet\n"); - dev_err(dev, "ERX: roq %p [ws %u] skb %p nsn %d sn %u\n", - roq, roq->ws, skb, nsn, roq_data->sn); - skb_queue_walk(&roq->queue, skb_itr) { - roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; - nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); - /* NSN bounds assumed correct (checked when it was queued) */ - dev_err(dev, "ERX: roq %p skb_itr %p nsn %d sn %u\n", - roq, skb_itr, nsn_itr, roq_data_itr->sn); - } - BUG(); -out: - d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", - i2400m, roq, skb, sn, nsn); -} - - -/* - * Backbone for the update window start operation - * - * @i2400m: device descriptor - * @roq: Reorder queue - * @sn: New sequence number - * - * Updates the window start of a queue; when doing so, it must deliver - * to the networking stack all the queued skb's whose normalized - * sequence number is lower than the new normalized window start. - */ -static -unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, - unsigned sn) -{ - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *skb_itr, *tmp_itr; - struct i2400m_roq_data *roq_data_itr; - unsigned new_nws, nsn_itr; - - new_nws = __i2400m_roq_nsn(roq, sn); - /* - * For type 2(update_window_start) rx messages, there is no - * need to check if the normalized sequence number is greater 1023. - * Simply insert and deliver all packets to the host up to the - * window start. - */ - skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { - roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; - nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); - /* NSN bounds assumed correct (checked when it was queued) */ - if (nsn_itr < new_nws) { - d_printf(2, dev, "ERX: roq %p - release skb %p " - "(nsn %u/%u new nws %u)\n", - roq, skb_itr, nsn_itr, roq_data_itr->sn, - new_nws); - __skb_unlink(skb_itr, &roq->queue); - i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); - } - else - break; /* rest of packets all nsn_itr > nws */ - } - roq->ws = sn; - return new_nws; -} - - -/* - * Reset a queue - * - * @i2400m: device descriptor - * @cin: Queue Index - * - * Deliver all the packets and reset the window-start to zero. Name is - * kind of misleading. - */ -static -void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq) -{ - struct device *dev = i2400m_dev(i2400m); - struct sk_buff *skb_itr, *tmp_itr; - struct i2400m_roq_data *roq_data_itr; - - d_fnstart(2, dev, "(i2400m %p roq %p)\n", i2400m, roq); - i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_RESET, - roq->ws, skb_queue_len(&roq->queue), - ~0, ~0, 0); - skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { - roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; - d_printf(2, dev, "ERX: roq %p - release skb %p (sn %u)\n", - roq, skb_itr, roq_data_itr->sn); - __skb_unlink(skb_itr, &roq->queue); - i2400m_net_erx(i2400m, skb_itr, roq_data_itr->cs); - } - roq->ws = 0; - d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); -} - - -/* - * Queue a packet - * - * @i2400m: device descriptor - * @cin: Queue Index - * @skb: containing the packet data - * @fbn: First block number of the packet in @skb - * @lbn: Last block number of the packet in @skb - * - * The hardware is asking the driver to queue a packet for later - * delivery to the networking stack. - */ -static -void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq, - struct sk_buff * skb, unsigned lbn) -{ - struct device *dev = i2400m_dev(i2400m); - unsigned nsn, len; - - d_fnstart(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", - i2400m, roq, skb, lbn); - len = skb_queue_len(&roq->queue); - nsn = __i2400m_roq_nsn(roq, lbn); - if (unlikely(nsn >= 1024)) { - dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", - nsn, lbn, roq->ws); - i2400m_roq_log_dump(i2400m, roq); - i2400m_reset(i2400m, I2400M_RT_WARM); - } else { - __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); - i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, - roq->ws, len, lbn, nsn, ~0); - } - d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", - i2400m, roq, skb, lbn); -} - - -/* - * Update the window start in a reorder queue and deliver all skbs - * with a lower window start - * - * @i2400m: device descriptor - * @roq: Reorder queue - * @sn: New sequence number - */ -static -void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, - unsigned sn) -{ - struct device *dev = i2400m_dev(i2400m); - unsigned old_ws, nsn, len; - - d_fnstart(2, dev, "(i2400m %p roq %p sn %u)\n", i2400m, roq, sn); - old_ws = roq->ws; - len = skb_queue_len(&roq->queue); - nsn = __i2400m_roq_update_ws(i2400m, roq, sn); - i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, - old_ws, len, sn, nsn, roq->ws); - d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); -} - - -/* - * Queue a packet and update the window start - * - * @i2400m: device descriptor - * @cin: Queue Index - * @skb: containing the packet data - * @fbn: First block number of the packet in @skb - * @sn: Last block number of the packet in @skb - * - * Note that unlike i2400m_roq_update_ws(), which sets the new window - * start to @sn, in here we'll set it to @sn + 1. - */ -static -void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, - struct sk_buff * skb, unsigned sn) -{ - struct device *dev = i2400m_dev(i2400m); - unsigned nsn, old_ws, len; - - d_fnstart(2, dev, "(i2400m %p roq %p skb %p sn %u)\n", - i2400m, roq, skb, sn); - len = skb_queue_len(&roq->queue); - nsn = __i2400m_roq_nsn(roq, sn); - /* - * For type 3(queue_update_window_start) rx messages, there is no - * need to check if the normalized sequence number is greater 1023. - * Simply insert and deliver all packets to the host up to the - * window start. - */ - old_ws = roq->ws; - /* If the queue is empty, don't bother as we'd queue - * it and immediately unqueue it -- just deliver it. - */ - if (len == 0) { - struct i2400m_roq_data *roq_data; - roq_data = (struct i2400m_roq_data *) &skb->cb; - i2400m_net_erx(i2400m, skb, roq_data->cs); - } else - __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); - - __i2400m_roq_update_ws(i2400m, roq, sn + 1); - i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, - old_ws, len, sn, nsn, roq->ws); - - d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", - i2400m, roq, skb, sn); -} - - -/* - * This routine destroys the memory allocated for rx_roq, when no - * other thread is accessing it. Access to rx_roq is refcounted by - * rx_roq_refcount, hence memory allocated must be destroyed when - * rx_roq_refcount becomes zero. This routine gets executed when - * rx_roq_refcount becomes zero. - */ -static void i2400m_rx_roq_destroy(struct kref *ref) -{ - unsigned itr; - struct i2400m *i2400m - = container_of(ref, struct i2400m, rx_roq_refcount); - for (itr = 0; itr < I2400M_RO_CIN + 1; itr++) - __skb_queue_purge(&i2400m->rx_roq[itr].queue); - kfree(i2400m->rx_roq[0].log); - kfree(i2400m->rx_roq); - i2400m->rx_roq = NULL; -} - -/* - * Receive and send up an extended data packet - * - * @i2400m: device descriptor - * @skb_rx: skb that contains the extended data packet - * @single_last: 1 if the payload is the only one or the last one of - * the skb. - * @payload: pointer to the packet's data inside the skb - * @size: size of the payload - * - * Starting in v1.4 of the i2400m's firmware, the device can send data - * packets to the host in an extended format that; this incudes a 16 - * byte header (struct i2400m_pl_edata_hdr). Using this header's space - * we can fake ethernet headers for ethernet device emulation without - * having to copy packets around. - * - * This function handles said path. - * - * - * Receive and send up an extended data packet that requires no reordering - * - * @i2400m: device descriptor - * @skb_rx: skb that contains the extended data packet - * @single_last: 1 if the payload is the only one or the last one of - * the skb. - * @payload: pointer to the packet's data (past the actual extended - * data payload header). - * @size: size of the payload - * - * Pass over to the networking stack a data packet that might have - * reordering requirements. - * - * This needs to the decide if the skb in which the packet is - * contained can be reused or if it needs to be cloned. Then it has to - * be trimmed in the edges so that the beginning is the space for eth - * header and then pass it to i2400m_net_erx() for the stack - * - * Assumes the caller has verified the sanity of the payload (size, - * etc) already. - */ -static -void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, - unsigned single_last, const void *payload, size_t size) -{ - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_pl_edata_hdr *hdr = payload; - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - struct sk_buff *skb; - enum i2400m_cs cs; - u32 reorder; - unsigned ro_needed, ro_type, ro_cin, ro_sn; - struct i2400m_roq *roq; - struct i2400m_roq_data *roq_data; - unsigned long flags; - - BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); - - d_fnstart(2, dev, "(i2400m %p skb_rx %p single %u payload %p " - "size %zu)\n", i2400m, skb_rx, single_last, payload, size); - if (size < sizeof(*hdr)) { - dev_err(dev, "ERX: HW BUG? message with short header (%zu " - "vs %zu bytes expected)\n", size, sizeof(*hdr)); - goto error; - } - - if (single_last) { - skb = skb_get(skb_rx); - d_printf(3, dev, "ERX: skb %p reusing\n", skb); - } else { - skb = skb_clone(skb_rx, GFP_KERNEL); - if (skb == NULL) { - dev_err(dev, "ERX: no memory to clone skb\n"); - net_dev->stats.rx_dropped++; - goto error_skb_clone; - } - d_printf(3, dev, "ERX: skb %p cloned from %p\n", skb, skb_rx); - } - /* now we have to pull and trim so that the skb points to the - * beginning of the IP packet; the netdev part will add the - * ethernet header as needed - we know there is enough space - * because we checked in i2400m_rx_edata(). */ - skb_pull(skb, payload + sizeof(*hdr) - (void *) skb->data); - skb_trim(skb, (void *) skb_end_pointer(skb) - payload - sizeof(*hdr)); - - reorder = le32_to_cpu(hdr->reorder); - ro_needed = reorder & I2400M_RO_NEEDED; - cs = hdr->cs; - if (ro_needed) { - ro_type = (reorder >> I2400M_RO_TYPE_SHIFT) & I2400M_RO_TYPE; - ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; - ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; - - spin_lock_irqsave(&i2400m->rx_lock, flags); - if (i2400m->rx_roq == NULL) { - kfree_skb(skb); /* rx_roq is already destroyed */ - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - goto error; - } - roq = &i2400m->rx_roq[ro_cin]; - kref_get(&i2400m->rx_roq_refcount); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - - roq_data = (struct i2400m_roq_data *) &skb->cb; - roq_data->sn = ro_sn; - roq_data->cs = cs; - d_printf(2, dev, "ERX: reorder needed: " - "type %u cin %u [ws %u] sn %u/%u len %zuB\n", - ro_type, ro_cin, roq->ws, ro_sn, - __i2400m_roq_nsn(roq, ro_sn), size); - d_dump(2, dev, payload, size); - switch(ro_type) { - case I2400M_RO_TYPE_RESET: - i2400m_roq_reset(i2400m, roq); - kfree_skb(skb); /* no data here */ - break; - case I2400M_RO_TYPE_PACKET: - i2400m_roq_queue(i2400m, roq, skb, ro_sn); - break; - case I2400M_RO_TYPE_WS: - i2400m_roq_update_ws(i2400m, roq, ro_sn); - kfree_skb(skb); /* no data here */ - break; - case I2400M_RO_TYPE_PACKET_WS: - i2400m_roq_queue_update_ws(i2400m, roq, skb, ro_sn); - break; - default: - dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); - } - - spin_lock_irqsave(&i2400m->rx_lock, flags); - kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - } - else - i2400m_net_erx(i2400m, skb, cs); -error_skb_clone: -error: - d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " - "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); -} - - -/* - * Act on a received payload - * - * @i2400m: device instance - * @skb_rx: skb where the transaction was received - * @single_last: 1 this is the only payload or the last one (so the - * skb can be reused instead of cloned). - * @pld: payload descriptor - * @payload: payload data - * - * Upon reception of a payload, look at its guts in the payload - * descriptor and decide what to do with it. If it is a single payload - * skb or if the last skb is a data packet, the skb will be referenced - * and modified (so it doesn't have to be cloned). - */ -static -void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx, - unsigned single_last, const struct i2400m_pld *pld, - const void *payload) -{ - struct device *dev = i2400m_dev(i2400m); - size_t pl_size = i2400m_pld_size(pld); - enum i2400m_pt pl_type = i2400m_pld_type(pld); - - d_printf(7, dev, "RX: received payload type %u, %zu bytes\n", - pl_type, pl_size); - d_dump(8, dev, payload, pl_size); - - switch (pl_type) { - case I2400M_PT_DATA: - d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size); - i2400m_net_rx(i2400m, skb_rx, single_last, payload, pl_size); - break; - case I2400M_PT_CTRL: - i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size); - break; - case I2400M_PT_TRACE: - i2400m_rx_trace(i2400m, payload, pl_size); - break; - case I2400M_PT_EDATA: - d_printf(3, dev, "ERX: data payload %zu bytes\n", pl_size); - i2400m_rx_edata(i2400m, skb_rx, single_last, payload, pl_size); - break; - default: /* Anything else shouldn't come to the host */ - if (printk_ratelimit()) - dev_err(dev, "RX: HW BUG? unexpected payload type %u\n", - pl_type); - } -} - - -/* - * Check a received transaction's message header - * - * @i2400m: device descriptor - * @msg_hdr: message header - * @buf_size: size of the received buffer - * - * Check that the declarations done by a RX buffer message header are - * sane and consistent with the amount of data that was received. - */ -static -int i2400m_rx_msg_hdr_check(struct i2400m *i2400m, - const struct i2400m_msg_hdr *msg_hdr, - size_t buf_size) -{ - int result = -EIO; - struct device *dev = i2400m_dev(i2400m); - if (buf_size < sizeof(*msg_hdr)) { - dev_err(dev, "RX: HW BUG? message with short header (%zu " - "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr)); - goto error; - } - if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) { - dev_err(dev, "RX: HW BUG? message received with unknown " - "barker 0x%08x (buf_size %zu bytes)\n", - le32_to_cpu(msg_hdr->barker), buf_size); - goto error; - } - if (msg_hdr->num_pls == 0) { - dev_err(dev, "RX: HW BUG? zero payload packets in message\n"); - goto error; - } - if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) { - dev_err(dev, "RX: HW BUG? message contains more payload " - "than maximum; ignoring.\n"); - goto error; - } - result = 0; -error: - return result; -} - - -/* - * Check a payload descriptor against the received data - * - * @i2400m: device descriptor - * @pld: payload descriptor - * @pl_itr: offset (in bytes) in the received buffer the payload is - * located - * @buf_size: size of the received buffer - * - * Given a payload descriptor (part of a RX buffer), check it is sane - * and that the data it declares fits in the buffer. - */ -static -int i2400m_rx_pl_descr_check(struct i2400m *i2400m, - const struct i2400m_pld *pld, - size_t pl_itr, size_t buf_size) -{ - int result = -EIO; - struct device *dev = i2400m_dev(i2400m); - size_t pl_size = i2400m_pld_size(pld); - enum i2400m_pt pl_type = i2400m_pld_type(pld); - - if (pl_size > i2400m->bus_pl_size_max) { - dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is " - "bigger than maximum %zu; ignoring message\n", - pl_itr, pl_size, i2400m->bus_pl_size_max); - goto error; - } - if (pl_itr + pl_size > buf_size) { /* enough? */ - dev_err(dev, "RX: HW BUG? payload @%zu: size %zu " - "goes beyond the received buffer " - "size (%zu bytes); ignoring message\n", - pl_itr, pl_size, buf_size); - goto error; - } - if (pl_type >= I2400M_PT_ILLEGAL) { - dev_err(dev, "RX: HW BUG? illegal payload type %u; " - "ignoring message\n", pl_type); - goto error; - } - result = 0; -error: - return result; -} - - -/** - * i2400m_rx - Receive a buffer of data from the device - * - * @i2400m: device descriptor - * @skb: skbuff where the data has been received - * - * Parse in a buffer of data that contains an RX message sent from the - * device. See the file header for the format. Run all checks on the - * buffer header, then run over each payload's descriptors, verify - * their consistency and act on each payload's contents. If - * everything is successful, update the device's statistics. - * - * Note: You need to set the skb to contain only the length of the - * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE). - * - * Returns: - * - * 0 if ok, < 0 errno on error - * - * If ok, this function owns now the skb and the caller DOESN'T have - * to run kfree_skb() on it. However, on error, the caller still owns - * the skb and it is responsible for releasing it. - */ -int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) -{ - int i, result; - struct device *dev = i2400m_dev(i2400m); - const struct i2400m_msg_hdr *msg_hdr; - size_t pl_itr, pl_size; - unsigned long flags; - unsigned num_pls, single_last, skb_len; - - skb_len = skb->len; - d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n", - i2400m, skb, skb_len); - msg_hdr = (void *) skb->data; - result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len); - if (result < 0) - goto error_msg_hdr_check; - result = -EIO; - num_pls = le16_to_cpu(msg_hdr->num_pls); - /* Check payload descriptor(s) */ - pl_itr = struct_size(msg_hdr, pld, num_pls); - pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); - if (pl_itr > skb_len) { /* got all the payload descriptors? */ - dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " - "%u payload descriptors (%zu each, total %zu)\n", - skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); - goto error_pl_descr_short; - } - /* Walk each payload payload--check we really got it */ - for (i = 0; i < num_pls; i++) { - /* work around old gcc warnings */ - pl_size = i2400m_pld_size(&msg_hdr->pld[i]); - result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], - pl_itr, skb_len); - if (result < 0) - goto error_pl_descr_check; - single_last = num_pls == 1 || i == num_pls - 1; - i2400m_rx_payload(i2400m, skb, single_last, &msg_hdr->pld[i], - skb->data + pl_itr); - pl_itr += ALIGN(pl_size, I2400M_PL_ALIGN); - cond_resched(); /* Don't monopolize */ - } - kfree_skb(skb); - /* Update device statistics */ - spin_lock_irqsave(&i2400m->rx_lock, flags); - i2400m->rx_pl_num += i; - if (i > i2400m->rx_pl_max) - i2400m->rx_pl_max = i; - if (i < i2400m->rx_pl_min) - i2400m->rx_pl_min = i; - i2400m->rx_num++; - i2400m->rx_size_acc += skb_len; - if (skb_len < i2400m->rx_size_min) - i2400m->rx_size_min = skb_len; - if (skb_len > i2400m->rx_size_max) - i2400m->rx_size_max = skb_len; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); -error_pl_descr_check: -error_pl_descr_short: -error_msg_hdr_check: - d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n", - i2400m, skb, skb_len, result); - return result; -} -EXPORT_SYMBOL_GPL(i2400m_rx); - - -void i2400m_unknown_barker(struct i2400m *i2400m, - const void *buf, size_t size) -{ - struct device *dev = i2400m_dev(i2400m); - char prefix[64]; - const __le32 *barker = buf; - dev_err(dev, "RX: HW BUG? unknown barker %08x, " - "dropping %zu bytes\n", le32_to_cpu(*barker), size); - snprintf(prefix, sizeof(prefix), "%s %s: ", - dev_driver_string(dev), dev_name(dev)); - if (size > 64) { - print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, - 8, 4, buf, 64, 0); - printk(KERN_ERR "%s... (only first 64 bytes " - "dumped)\n", prefix); - } else - print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, - 8, 4, buf, size, 0); -} -EXPORT_SYMBOL(i2400m_unknown_barker); - - -/* - * Initialize the RX queue and infrastructure - * - * This sets up all the RX reordering infrastructures, which will not - * be used if reordering is not enabled or if the firmware does not - * support it. The device is told to do reordering in - * i2400m_dev_initialize(), where it also looks at the value of the - * i2400m->rx_reorder switch before taking a decission. - * - * Note we allocate the roq queues in one chunk and the actual logging - * support for it (logging) in another one and then we setup the - * pointers from the first to the last. - */ -int i2400m_rx_setup(struct i2400m *i2400m) -{ - int result = 0; - - i2400m->rx_reorder = i2400m_rx_reorder_disabled? 0 : 1; - if (i2400m->rx_reorder) { - unsigned itr; - struct i2400m_roq_log *rd; - - result = -ENOMEM; - - i2400m->rx_roq = kcalloc(I2400M_RO_CIN + 1, - sizeof(i2400m->rx_roq[0]), GFP_KERNEL); - if (i2400m->rx_roq == NULL) - goto error_roq_alloc; - - rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log), - GFP_KERNEL); - if (rd == NULL) { - result = -ENOMEM; - goto error_roq_log_alloc; - } - - for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) { - __i2400m_roq_init(&i2400m->rx_roq[itr]); - i2400m->rx_roq[itr].log = &rd[itr]; - } - kref_init(&i2400m->rx_roq_refcount); - } - return 0; - -error_roq_log_alloc: - kfree(i2400m->rx_roq); -error_roq_alloc: - return result; -} - - -/* Tear down the RX queue and infrastructure */ -void i2400m_rx_release(struct i2400m *i2400m) -{ - unsigned long flags; - - if (i2400m->rx_reorder) { - spin_lock_irqsave(&i2400m->rx_lock, flags); - kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - } - /* at this point, nothing can be received... */ - i2400m_report_hook_flush(i2400m); -} diff --git a/drivers/net/wimax/i2400m/sysfs.c b/drivers/net/wimax/i2400m/sysfs.c deleted file mode 100644 index 895ee265909b..000000000000 --- a/drivers/net/wimax/i2400m/sysfs.c +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless WiMAX Connection 2400m - * Sysfs interfaces to show driver and device information - * - * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - */ - -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/spinlock.h> -#include <linux/device.h> -#include "i2400m.h" - - -#define D_SUBMODULE sysfs -#include "debug-levels.h" - - -/* - * Set the idle timeout (msecs) - * - * FIXME: eventually this should be a common WiMAX stack method, but - * would like to wait to see how other devices manage it. - */ -static -ssize_t i2400m_idle_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - ssize_t result; - struct i2400m *i2400m = net_dev_to_i2400m(to_net_dev(dev)); - unsigned val; - - result = -EINVAL; - if (sscanf(buf, "%u\n", &val) != 1) - goto error_no_unsigned; - if (val != 0 && (val < 100 || val > 300000 || val % 100 != 0)) { - dev_err(dev, "idle_timeout: %u: invalid msecs specification; " - "valid values are 0, 100-300000 in 100 increments\n", - val); - goto error_bad_value; - } - result = i2400m_set_idle_timeout(i2400m, val); - if (result >= 0) - result = size; -error_no_unsigned: -error_bad_value: - return result; -} - -static -DEVICE_ATTR_WO(i2400m_idle_timeout); - -static -struct attribute *i2400m_dev_attrs[] = { - &dev_attr_i2400m_idle_timeout.attr, - NULL, -}; - -struct attribute_group i2400m_dev_attr_group = { - .name = NULL, /* we want them in the same directory */ - .attrs = i2400m_dev_attrs, -}; diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c deleted file mode 100644 index 1255302e251e..000000000000 --- a/drivers/net/wimax/i2400m/tx.c +++ /dev/null @@ -1,1011 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * Generic (non-bus specific) TX handling - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - Initial implementation - * - * Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Rewritten to use a single FIFO to lower the memory allocation - * pressure and optimize cache hits when copying to the queue, as - * well as splitting out bus-specific code. - * - * - * Implements data transmission to the device; this is done through a - * software FIFO, as data/control frames can be coalesced (while the - * device is reading the previous tx transaction, others accumulate). - * - * A FIFO is used because at the end it is resource-cheaper that trying - * to implement scatter/gather over USB. As well, most traffic is going - * to be download (vs upload). - * - * The format for sending/receiving data to/from the i2400m is - * described in detail in rx.c:PROTOCOL FORMAT. In here we implement - * the transmission of that. This is split between a bus-independent - * part that just prepares everything and a bus-specific part that - * does the actual transmission over the bus to the device (in the - * bus-specific driver). - * - * - * The general format of a device-host transaction is MSG-HDR, PLD1, - * PLD2...PLDN, PL1, PL2,...PLN, PADDING. - * - * Because we need the send payload descriptors and then payloads and - * because it is kind of expensive to do scatterlists in USB (one URB - * per node), it becomes cheaper to append all the data to a FIFO - * (copying to a FIFO potentially in cache is cheaper). - * - * Then the bus-specific code takes the parts of that FIFO that are - * written and passes them to the device. - * - * So the concepts to keep in mind there are: - * - * We use a FIFO to queue the data in a linear buffer. We first append - * a MSG-HDR, space for I2400M_TX_PLD_MAX payload descriptors and then - * go appending payloads until we run out of space or of payload - * descriptors. Then we append padding to make the whole transaction a - * multiple of i2400m->bus_tx_block_size (as defined by the bus layer). - * - * - A TX message: a combination of a message header, payload - * descriptors and payloads. - * - * Open: it is marked as active (i2400m->tx_msg is valid) and we - * can keep adding payloads to it. - * - * Closed: we are not appending more payloads to this TX message - * (exahusted space in the queue, too many payloads or - * whichever). We have appended padding so the whole message - * length is aligned to i2400m->bus_tx_block_size (as set by the - * bus/transport layer). - * - * - Most of the time we keep a TX message open to which we append - * payloads. - * - * - If we are going to append and there is no more space (we are at - * the end of the FIFO), we close the message, mark the rest of the - * FIFO space unusable (skip_tail), create a new message at the - * beginning of the FIFO (if there is space) and append the message - * there. - * - * This is because we need to give linear TX messages to the bus - * engine. So we don't write a message to the remaining FIFO space - * until the tail and continue at the head of it. - * - * - We overload one of the fields in the message header to use it as - * 'size' of the TX message, so we can iterate over them. It also - * contains a flag that indicates if we have to skip it or not. - * When we send the buffer, we update that to its real on-the-wire - * value. - * - * - The MSG-HDR PLD1...PLD2 stuff has to be a size multiple of 16. - * - * It follows that if MSG-HDR says we have N messages, the whole - * header + descriptors is 16 + 4*N; for those to be a multiple of - * 16, it follows that N can be 4, 8, 12, ... (32, 48, 64, 80... - * bytes). - * - * So if we have only 1 payload, we have to submit a header that in - * all truth has space for 4. - * - * The implication is that we reserve space for 12 (64 bytes); but - * if we fill up only (eg) 2, our header becomes 32 bytes only. So - * the TX engine has to shift those 32 bytes of msg header and 2 - * payloads and padding so that right after it the payloads start - * and the TX engine has to know about that. - * - * It is cheaper to move the header up than the whole payloads down. - * - * We do this in i2400m_tx_close(). See 'i2400m_msg_hdr->offset'. - * - * - Each payload has to be size-padded to 16 bytes; before appending - * it, we just do it. - * - * - The whole message has to be padded to i2400m->bus_tx_block_size; - * we do this at close time. Thus, when reserving space for the - * payload, we always make sure there is also free space for this - * padding that sooner or later will happen. - * - * When we append a message, we tell the bus specific code to kick in - * TXs. It will TX (in parallel) until the buffer is exhausted--hence - * the lockin we do. The TX code will only send a TX message at the - * time (which remember, might contain more than one payload). Of - * course, when the bus-specific driver attempts to TX a message that - * is still open, it gets closed first. - * - * Gee, this is messy; well a picture. In the example below we have a - * partially full FIFO, with a closed message ready to be delivered - * (with a moved message header to make sure it is size-aligned to - * 16), TAIL room that was unusable (and thus is marked with a message - * header that says 'skip this') and at the head of the buffer, an - * incomplete message with a couple of payloads. - * - * N ___________________________________________________ - * | | - * | TAIL room | - * | | - * | msg_hdr to skip (size |= 0x80000) | - * |---------------------------------------------------|------- - * | | /|\ - * | | | - * | TX message padding | | - * | | | - * | | | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | - * | | | - * | payload 1 | | - * | | N * tx_block_size - * | | | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | - * | | | - * | payload 1 | | - * | | | - * | | | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- -|- - - - - * | padding 3 /|\ | | /|\ - * | padding 2 | | | | - * | pld 1 32 bytes (2 * 16) | | | - * | pld 0 | | | | - * | moved msg_hdr \|/ | \|/ | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -|- - - | - * | | _PLD_SIZE - * | unused | | - * | | | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -| | - * | msg_hdr (size X) [this message is closed] | \|/ - * |===================================================|========== <=== OUT - * | | - * | | - * | | - * | Free rooom | - * | | - * | | - * | | - * | | - * | | - * | | - * | | - * | | - * | | - * |===================================================|========== <=== IN - * | | - * | | - * | | - * | | - * | payload 1 | - * | | - * | | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -| - * | | - * | payload 0 | - * | | - * | | - * |- - - - - - - - - - - - - - - - - - - - - - - - - -| - * | pld 11 /|\ | - * | ... | | - * | pld 1 64 bytes (2 * 16) | - * | pld 0 | | - * | msg_hdr (size X) \|/ [message is open] | - * 0 --------------------------------------------------- - * - * - * ROADMAP - * - * i2400m_tx_setup() Called by i2400m_setup - * i2400m_tx_release() Called by i2400m_release() - * - * i2400m_tx() Called to send data or control frames - * i2400m_tx_fifo_push() Allocates append-space in the FIFO - * i2400m_tx_new() Opens a new message in the FIFO - * i2400m_tx_fits() Checks if a new payload fits in the message - * i2400m_tx_close() Closes an open message in the FIFO - * i2400m_tx_skip_tail() Marks unusable FIFO tail space - * i2400m->bus_tx_kick() - * - * Now i2400m->bus_tx_kick() is the the bus-specific driver backend - * implementation; that would do: - * - * i2400m->bus_tx_kick() - * i2400m_tx_msg_get() Gets first message ready to go - * ...sends it... - * i2400m_tx_msg_sent() Ack the message is sent; repeat from - * _tx_msg_get() until it returns NULL - * (FIFO empty). - */ -#include <linux/netdevice.h> -#include <linux/slab.h> -#include <linux/export.h> -#include "i2400m.h" - - -#define D_SUBMODULE tx -#include "debug-levels.h" - -enum { - /** - * TX Buffer size - * - * Doc says maximum transaction is 16KiB. If we had 16KiB en - * route and 16KiB being queued, it boils down to needing - * 32KiB. - * 32KiB is insufficient for 1400 MTU, hence increasing - * tx buffer size to 64KiB. - */ - I2400M_TX_BUF_SIZE = 65536, - /** - * Message header and payload descriptors have to be 16 - * aligned (16 + 4 * N = 16 * M). If we take that average sent - * packets are MTU size (~1400-~1500) it follows that we could - * fit at most 10-11 payloads in one transaction. To meet the - * alignment requirement, that means we need to leave space - * for 12 (64 bytes). To simplify, we leave space for that. If - * at the end there are less, we pad up to the nearest - * multiple of 16. - */ - /* - * According to Intel Wimax i3200, i5x50 and i6x50 specification - * documents, the maximum number of payloads per message can be - * up to 60. Increasing the number of payloads to 60 per message - * helps to accommodate smaller payloads in a single transaction. - */ - I2400M_TX_PLD_MAX = 60, - I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) - + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), - I2400M_TX_SKIP = 0x80000000, - /* - * According to Intel Wimax i3200, i5x50 and i6x50 specification - * documents, the maximum size of each message can be up to 16KiB. - */ - I2400M_TX_MSG_SIZE = 16384, -}; - -#define TAIL_FULL ((void *)~(unsigned long)NULL) - -/* - * Calculate how much tail room is available - * - * Note the trick here. This path is ONLY caleed for Case A (see - * i2400m_tx_fifo_push() below), where we have: - * - * Case A - * N ___________ - * | tail room | - * | | - * |<- IN ->| - * | | - * | data | - * | | - * |<- OUT ->| - * | | - * | head room | - * 0 ----------- - * - * When calculating the tail_room, tx_in might get to be zero if - * i2400m->tx_in is right at the end of the buffer (really full - * buffer) if there is no head room. In this case, tail_room would be - * I2400M_TX_BUF_SIZE, although it is actually zero. Hence the final - * mod (%) operation. However, when doing this kind of optimization, - * i2400m->tx_in being zero would fail, so we treat is an a special - * case. - */ -static inline -size_t __i2400m_tx_tail_room(struct i2400m *i2400m) -{ - size_t tail_room; - size_t tx_in; - - if (unlikely(i2400m->tx_in == 0)) - return I2400M_TX_BUF_SIZE; - tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; - tail_room = I2400M_TX_BUF_SIZE - tx_in; - tail_room %= I2400M_TX_BUF_SIZE; - return tail_room; -} - - -/* - * Allocate @size bytes in the TX fifo, return a pointer to it - * - * @i2400m: device descriptor - * @size: size of the buffer we need to allocate - * @padding: ensure that there is at least this many bytes of free - * contiguous space in the fifo. This is needed because later on - * we might need to add padding. - * @try_head: specify either to allocate head room or tail room space - * in the TX FIFO. This boolean is required to avoids a system hang - * due to an infinite loop caused by i2400m_tx_fifo_push(). - * The caller must always try to allocate tail room space first by - * calling this routine with try_head = 0. In case if there - * is not enough tail room space but there is enough head room space, - * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head - * room space, by calling this routine again with try_head = 1. - * - * Returns: - * - * Pointer to the allocated space. NULL if there is no - * space. TAIL_FULL if there is no space at the tail but there is at - * the head (Case B below). - * - * These are the two basic cases we need to keep an eye for -- it is - * much better explained in linux/kernel/kfifo.c, but this code - * basically does the same. No rocket science here. - * - * Case A Case B - * N ___________ ___________ - * | tail room | | data | - * | | | | - * |<- IN ->| |<- OUT ->| - * | | | | - * | data | | room | - * | | | | - * |<- OUT ->| |<- IN ->| - * | | | | - * | head room | | data | - * 0 ----------- ----------- - * - * We allocate only *contiguous* space. - * - * We can allocate only from 'room'. In Case B, it is simple; in case - * A, we only try from the tail room; if it is not enough, we just - * fail and return TAIL_FULL and let the caller figure out if we wants to - * skip the tail room and try to allocate from the head. - * - * There is a corner case, wherein i2400m_tx_new() can get into - * an infinite loop calling i2400m_tx_fifo_push(). - * In certain situations, tx_in would have reached on the top of TX FIFO - * and i2400m_tx_tail_room() returns 0, as described below: - * - * N ___________ tail room is zero - * |<- IN ->| - * | | - * | | - * | | - * | data | - * |<- OUT ->| - * | | - * | | - * | head room | - * 0 ----------- - * During such a time, where tail room is zero in the TX FIFO and if there - * is a request to add a payload to TX FIFO, which calls: - * i2400m_tx() - * ->calls i2400m_tx_close() - * ->calls i2400m_tx_skip_tail() - * goto try_new; - * ->calls i2400m_tx_new() - * |----> [try_head:] - * infinite loop | ->calls i2400m_tx_fifo_push() - * | if (tail_room < needed) - * | if (head_room => needed) - * | return TAIL_FULL; - * |<---- goto try_head; - * - * i2400m_tx() calls i2400m_tx_close() to close the message, since there - * is no tail room to accommodate the payload and calls - * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls - * i2400m_tx_new() to allocate space for new message header calling - * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space - * to accommodate the message header, but there is enough head space. - * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push() - * ending up in a loop causing system freeze. - * - * This corner case is avoided by using a try_head boolean, - * as an argument to i2400m_tx_fifo_push(). - * - * Note: - * - * Assumes i2400m->tx_lock is taken, and we use that as a barrier - * - * The indexes keep increasing and we reset them to zero when we - * pop data off the queue - */ -static -void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, - size_t padding, bool try_head) -{ - struct device *dev = i2400m_dev(i2400m); - size_t room, tail_room, needed_size; - void *ptr; - - needed_size = size + padding; - room = I2400M_TX_BUF_SIZE - (i2400m->tx_in - i2400m->tx_out); - if (room < needed_size) { /* this takes care of Case B */ - d_printf(2, dev, "fifo push %zu/%zu: no space\n", - size, padding); - return NULL; - } - /* Is there space at the tail? */ - tail_room = __i2400m_tx_tail_room(i2400m); - if (!try_head && tail_room < needed_size) { - /* - * If the tail room space is not enough to push the message - * in the TX FIFO, then there are two possibilities: - * 1. There is enough head room space to accommodate - * this message in the TX FIFO. - * 2. There is not enough space in the head room and - * in tail room of the TX FIFO to accommodate the message. - * In the case (1), return TAIL_FULL so that the caller - * can figure out, if the caller wants to push the message - * into the head room space. - * In the case (2), return NULL, indicating that the TX FIFO - * cannot accommodate the message. - */ - if (room - tail_room >= needed_size) { - d_printf(2, dev, "fifo push %zu/%zu: tail full\n", - size, padding); - return TAIL_FULL; /* There might be head space */ - } else { - d_printf(2, dev, "fifo push %zu/%zu: no head space\n", - size, padding); - return NULL; /* There is no space */ - } - } - ptr = i2400m->tx_buf + i2400m->tx_in % I2400M_TX_BUF_SIZE; - d_printf(2, dev, "fifo push %zu/%zu: at @%zu\n", size, padding, - i2400m->tx_in % I2400M_TX_BUF_SIZE); - i2400m->tx_in += size; - return ptr; -} - - -/* - * Mark the tail of the FIFO buffer as 'to-skip' - * - * We should never hit the BUG_ON() because all the sizes we push to - * the FIFO are padded to be a multiple of 16 -- the size of *msg - * (I2400M_PL_PAD for the payloads, I2400M_TX_PLD_SIZE for the - * header). - * - * Tail room can get to be zero if a message was opened when there was - * space only for a header. _tx_close() will mark it as to-skip (as it - * will have no payloads) and there will be no more space to flush, so - * nothing has to be done here. This is probably cheaper than ensuring - * in _tx_new() that there is some space for payloads...as we could - * always possibly hit the same problem if the payload wouldn't fit. - * - * Note: - * - * Assumes i2400m->tx_lock is taken, and we use that as a barrier - * - * This path is only taken for Case A FIFO situations [see - * i2400m_tx_fifo_push()] - */ -static -void i2400m_tx_skip_tail(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - size_t tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; - size_t tail_room = __i2400m_tx_tail_room(i2400m); - struct i2400m_msg_hdr *msg = i2400m->tx_buf + tx_in; - if (unlikely(tail_room == 0)) - return; - BUG_ON(tail_room < sizeof(*msg)); - msg->size = tail_room | I2400M_TX_SKIP; - d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n", - tail_room, tx_in); - i2400m->tx_in += tail_room; -} - - -/* - * Check if a skb will fit in the TX queue's current active TX - * message (if there are still descriptors left unused). - * - * Returns: - * 0 if the message won't fit, 1 if it will. - * - * Note: - * - * Assumes a TX message is active (i2400m->tx_msg). - * - * Assumes i2400m->tx_lock is taken, and we use that as a barrier - */ -static -unsigned i2400m_tx_fits(struct i2400m *i2400m) -{ - struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg; - return le16_to_cpu(msg_hdr->num_pls) < I2400M_TX_PLD_MAX; - -} - - -/* - * Start a new TX message header in the queue. - * - * Reserve memory from the base FIFO engine and then just initialize - * the message header. - * - * We allocate the biggest TX message header we might need (one that'd - * fit I2400M_TX_PLD_MAX payloads) -- when it is closed it will be - * 'ironed it out' and the unneeded parts removed. - * - * NOTE: - * - * Assumes that the previous message is CLOSED (eg: either - * there was none or 'i2400m_tx_close()' was called on it). - * - * Assumes i2400m->tx_lock is taken, and we use that as a barrier - */ -static -void i2400m_tx_new(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - struct i2400m_msg_hdr *tx_msg; - bool try_head = false; - BUG_ON(i2400m->tx_msg != NULL); - /* - * In certain situations, TX queue might have enough space to - * accommodate the new message header I2400M_TX_PLD_SIZE, but - * might not have enough space to accommodate the payloads. - * Adding bus_tx_room_min padding while allocating a new TX message - * increases the possibilities of including at least one payload of the - * size <= bus_tx_room_min. - */ -try_head: - tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, - i2400m->bus_tx_room_min, try_head); - if (tx_msg == NULL) - goto out; - else if (tx_msg == TAIL_FULL) { - i2400m_tx_skip_tail(i2400m); - d_printf(2, dev, "new TX message: tail full, trying head\n"); - try_head = true; - goto try_head; - } - memset(tx_msg, 0, I2400M_TX_PLD_SIZE); - tx_msg->size = I2400M_TX_PLD_SIZE; -out: - i2400m->tx_msg = tx_msg; - d_printf(2, dev, "new TX message: %p @%zu\n", - tx_msg, (void *) tx_msg - i2400m->tx_buf); -} - - -/* - * Finalize the current TX message header - * - * Sets the message header to be at the proper location depending on - * how many descriptors we have (check documentation at the file's - * header for more info on that). - * - * Appends padding bytes to make sure the whole TX message (counting - * from the 'relocated' message header) is aligned to - * tx_block_size. We assume the _append() code has left enough space - * in the FIFO for that. If there are no payloads, just pass, as it - * won't be transferred. - * - * The amount of padding bytes depends on how many payloads are in the - * TX message, as the "msg header and payload descriptors" will be - * shifted up in the buffer. - */ -static -void i2400m_tx_close(struct i2400m *i2400m) -{ - struct device *dev = i2400m_dev(i2400m); - struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; - struct i2400m_msg_hdr *tx_msg_moved; - size_t aligned_size, padding, hdr_size; - void *pad_buf; - unsigned num_pls; - - if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */ - goto out; - num_pls = le16_to_cpu(tx_msg->num_pls); - /* We can get this situation when a new message was started - * and there was no space to add payloads before hitting the - tail (and taking padding into consideration). */ - if (num_pls == 0) { - tx_msg->size |= I2400M_TX_SKIP; - goto out; - } - /* Relocate the message header - * - * Find the current header size, align it to 16 and if we need - * to move it so the tail is next to the payloads, move it and - * set the offset. - * - * If it moved, this header is good only for transmission; the - * original one (it is kept if we moved) is still used to - * figure out where the next TX message starts (and where the - * offset to the moved header is). - */ - hdr_size = struct_size(tx_msg, pld, le16_to_cpu(tx_msg->num_pls)); - hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN); - tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; - tx_msg_moved = (void *) tx_msg + tx_msg->offset; - memmove(tx_msg_moved, tx_msg, hdr_size); - tx_msg_moved->size -= tx_msg->offset; - /* - * Now figure out how much we have to add to the (moved!) - * message so the size is a multiple of i2400m->bus_tx_block_size. - */ - aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); - padding = aligned_size - tx_msg_moved->size; - if (padding > 0) { - pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0); - if (WARN_ON(pad_buf == NULL || pad_buf == TAIL_FULL)) { - /* This should not happen -- append should verify - * there is always space left at least to append - * tx_block_size */ - dev_err(dev, - "SW BUG! Possible data leakage from memory the " - "device should not read for padding - " - "size %lu aligned_size %zu tx_buf %p in " - "%zu out %zu\n", - (unsigned long) tx_msg_moved->size, - aligned_size, i2400m->tx_buf, i2400m->tx_in, - i2400m->tx_out); - } else - memset(pad_buf, 0xad, padding); - } - tx_msg_moved->padding = cpu_to_le16(padding); - tx_msg_moved->size += padding; - if (tx_msg != tx_msg_moved) - tx_msg->size += padding; -out: - i2400m->tx_msg = NULL; -} - - -/** - * i2400m_tx - send the data in a buffer to the device - * - * @buf: pointer to the buffer to transmit - * - * @buf_len: buffer size - * - * @pl_type: type of the payload we are sending. - * - * Returns: - * 0 if ok, < 0 errno code on error (-ENOSPC, if there is no more - * room for the message in the queue). - * - * Appends the buffer to the TX FIFO and notifies the bus-specific - * part of the driver that there is new data ready to transmit. - * Once this function returns, the buffer has been copied, so it can - * be reused. - * - * The steps followed to append are explained in detail in the file - * header. - * - * Whenever we write to a message, we increase msg->size, so it - * reflects exactly how big the message is. This is needed so that if - * we concatenate two messages before they can be sent, the code that - * sends the messages can find the boundaries (and it will replace the - * size with the real barker before sending). - * - * Note: - * - * Cold and warm reset payloads need to be sent as a single - * payload, so we handle that. - */ -int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len, - enum i2400m_pt pl_type) -{ - int result = -ENOSPC; - struct device *dev = i2400m_dev(i2400m); - unsigned long flags; - size_t padded_len; - void *ptr; - bool try_head = false; - unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM - || pl_type == I2400M_PT_RESET_COLD; - - d_fnstart(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u)\n", - i2400m, buf, buf_len, pl_type); - padded_len = ALIGN(buf_len, I2400M_PL_ALIGN); - d_printf(5, dev, "padded_len %zd buf_len %zd\n", padded_len, buf_len); - /* If there is no current TX message, create one; if the - * current one is out of payload slots or we have a singleton, - * close it and start a new one */ - spin_lock_irqsave(&i2400m->tx_lock, flags); - /* If tx_buf is NULL, device is shutdown */ - if (i2400m->tx_buf == NULL) { - result = -ESHUTDOWN; - goto error_tx_new; - } -try_new: - if (unlikely(i2400m->tx_msg == NULL)) - i2400m_tx_new(i2400m); - else if (unlikely(!i2400m_tx_fits(i2400m) - || (is_singleton && i2400m->tx_msg->num_pls != 0))) { - d_printf(2, dev, "closing TX message (fits %u singleton " - "%u num_pls %u)\n", i2400m_tx_fits(i2400m), - is_singleton, i2400m->tx_msg->num_pls); - i2400m_tx_close(i2400m); - i2400m_tx_new(i2400m); - } - if (i2400m->tx_msg == NULL) - goto error_tx_new; - /* - * Check if this skb will fit in the TX queue's current active - * TX message. The total message size must not exceed the maximum - * size of each message I2400M_TX_MSG_SIZE. If it exceeds, - * close the current message and push this skb into the new message. - */ - if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) { - d_printf(2, dev, "TX: message too big, going new\n"); - i2400m_tx_close(i2400m); - i2400m_tx_new(i2400m); - } - if (i2400m->tx_msg == NULL) - goto error_tx_new; - /* So we have a current message header; now append space for - * the message -- if there is not enough, try the head */ - ptr = i2400m_tx_fifo_push(i2400m, padded_len, - i2400m->bus_tx_block_size, try_head); - if (ptr == TAIL_FULL) { /* Tail is full, try head */ - d_printf(2, dev, "pl append: tail full\n"); - i2400m_tx_close(i2400m); - i2400m_tx_skip_tail(i2400m); - try_head = true; - goto try_new; - } else if (ptr == NULL) { /* All full */ - result = -ENOSPC; - d_printf(2, dev, "pl append: all full\n"); - } else { /* Got space, copy it, set padding */ - struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; - unsigned num_pls = le16_to_cpu(tx_msg->num_pls); - memcpy(ptr, buf, buf_len); - memset(ptr + buf_len, 0xad, padded_len - buf_len); - i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type); - d_printf(3, dev, "pld 0x%08x (type 0x%1x len 0x%04zx\n", - le32_to_cpu(tx_msg->pld[num_pls].val), - pl_type, buf_len); - tx_msg->num_pls = le16_to_cpu(num_pls+1); - tx_msg->size += padded_len; - d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n", - padded_len, tx_msg->size, num_pls+1); - d_printf(2, dev, - "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", - (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size, - num_pls+1, ptr - i2400m->tx_buf, buf_len, padded_len); - result = 0; - if (is_singleton) - i2400m_tx_close(i2400m); - } -error_tx_new: - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - /* kick in most cases, except when the TX subsys is down, as - * it might free space */ - if (likely(result != -ESHUTDOWN)) - i2400m->bus_tx_kick(i2400m); - d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n", - i2400m, buf, buf_len, pl_type, result); - return result; -} -EXPORT_SYMBOL_GPL(i2400m_tx); - - -/** - * i2400m_tx_msg_get - Get the first TX message in the FIFO to start sending it - * - * @i2400m: device descriptors - * @bus_size: where to place the size of the TX message - * - * Called by the bus-specific driver to get the first TX message at - * the FIF that is ready for transmission. - * - * It sets the state in @i2400m to indicate the bus-specific driver is - * transferring that message (i2400m->tx_msg_size). - * - * Once the transfer is completed, call i2400m_tx_msg_sent(). - * - * Notes: - * - * The size of the TX message to be transmitted might be smaller than - * that of the TX message in the FIFO (in case the header was - * shorter). Hence, we copy it in @bus_size, for the bus layer to - * use. We keep the message's size in i2400m->tx_msg_size so that - * when the bus later is done transferring we know how much to - * advance the fifo. - * - * We collect statistics here as all the data is available and we - * assume it is going to work [see i2400m_tx_msg_sent()]. - */ -struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m, - size_t *bus_size) -{ - struct device *dev = i2400m_dev(i2400m); - struct i2400m_msg_hdr *tx_msg, *tx_msg_moved; - unsigned long flags, pls; - - d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size); - spin_lock_irqsave(&i2400m->tx_lock, flags); - tx_msg_moved = NULL; - if (i2400m->tx_buf == NULL) - goto out_unlock; -skip: - tx_msg_moved = NULL; - if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */ - i2400m->tx_in = 0; - i2400m->tx_out = 0; - d_printf(2, dev, "TX: FIFO empty: resetting\n"); - goto out_unlock; - } - tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE; - if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */ - d_printf(2, dev, "TX: skip: msg @%zu (%zu b)\n", - i2400m->tx_out % I2400M_TX_BUF_SIZE, - (size_t) tx_msg->size & ~I2400M_TX_SKIP); - i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; - goto skip; - } - - if (tx_msg->num_pls == 0) { /* No payloads? */ - if (tx_msg == i2400m->tx_msg) { /* open, we are done */ - d_printf(2, dev, - "TX: FIFO empty: open msg w/o payloads @%zu\n", - (void *) tx_msg - i2400m->tx_buf); - tx_msg = NULL; - goto out_unlock; - } else { /* closed, skip it */ - d_printf(2, dev, - "TX: skip msg w/o payloads @%zu (%zu b)\n", - (void *) tx_msg - i2400m->tx_buf, - (size_t) tx_msg->size); - i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; - goto skip; - } - } - if (tx_msg == i2400m->tx_msg) /* open msg? */ - i2400m_tx_close(i2400m); - - /* Now we have a valid TX message (with payloads) to TX */ - tx_msg_moved = (void *) tx_msg + tx_msg->offset; - i2400m->tx_msg_size = tx_msg->size; - *bus_size = tx_msg_moved->size; - d_printf(2, dev, "TX: pid %d msg hdr at @%zu offset +@%zu " - "size %zu bus_size %zu\n", - current->pid, (void *) tx_msg - i2400m->tx_buf, - (size_t) tx_msg->offset, (size_t) tx_msg->size, - (size_t) tx_msg_moved->size); - tx_msg_moved->barker = le32_to_cpu(I2400M_H2D_PREVIEW_BARKER); - tx_msg_moved->sequence = le32_to_cpu(i2400m->tx_sequence++); - - pls = le32_to_cpu(tx_msg_moved->num_pls); - i2400m->tx_pl_num += pls; /* Update stats */ - if (pls > i2400m->tx_pl_max) - i2400m->tx_pl_max = pls; - if (pls < i2400m->tx_pl_min) - i2400m->tx_pl_min = pls; - i2400m->tx_num++; - i2400m->tx_size_acc += *bus_size; - if (*bus_size < i2400m->tx_size_min) - i2400m->tx_size_min = *bus_size; - if (*bus_size > i2400m->tx_size_max) - i2400m->tx_size_max = *bus_size; -out_unlock: - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - d_fnstart(3, dev, "(i2400m %p bus_size %p [%zu]) = %p\n", - i2400m, bus_size, *bus_size, tx_msg_moved); - return tx_msg_moved; -} -EXPORT_SYMBOL_GPL(i2400m_tx_msg_get); - - -/** - * i2400m_tx_msg_sent - indicate the transmission of a TX message - * - * @i2400m: device descriptor - * - * Called by the bus-specific driver when a message has been sent; - * this pops it from the FIFO; and as there is space, start the queue - * in case it was stopped. - * - * Should be called even if the message send failed and we are - * dropping this TX message. - */ -void i2400m_tx_msg_sent(struct i2400m *i2400m) -{ - unsigned n; - unsigned long flags; - struct device *dev = i2400m_dev(i2400m); - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - spin_lock_irqsave(&i2400m->tx_lock, flags); - if (i2400m->tx_buf == NULL) - goto out_unlock; - i2400m->tx_out += i2400m->tx_msg_size; - d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size); - i2400m->tx_msg_size = 0; - BUG_ON(i2400m->tx_out > i2400m->tx_in); - /* level them FIFO markers off */ - n = i2400m->tx_out / I2400M_TX_BUF_SIZE; - i2400m->tx_out %= I2400M_TX_BUF_SIZE; - i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; -out_unlock: - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} -EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent); - - -/** - * i2400m_tx_setup - Initialize the TX queue and infrastructure - * - * Make sure we reset the TX sequence to zero, as when this function - * is called, the firmware has been just restarted. Same rational - * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since - * the memory for TX queue is reallocated. - */ -int i2400m_tx_setup(struct i2400m *i2400m) -{ - int result = 0; - void *tx_buf; - unsigned long flags; - - /* Do this here only once -- can't do on - * i2400m_hard_start_xmit() as we'll cause race conditions if - * the WS was scheduled on another CPU */ - INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); - - tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC); - if (tx_buf == NULL) { - result = -ENOMEM; - goto error_kmalloc; - } - - /* - * Fail the build if we can't fit at least two maximum size messages - * on the TX FIFO [one being delivered while one is constructed]. - */ - BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE); - spin_lock_irqsave(&i2400m->tx_lock, flags); - i2400m->tx_sequence = 0; - i2400m->tx_in = 0; - i2400m->tx_out = 0; - i2400m->tx_msg_size = 0; - i2400m->tx_msg = NULL; - i2400m->tx_buf = tx_buf; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - /* Huh? the bus layer has to define this... */ - BUG_ON(i2400m->bus_tx_block_size == 0); -error_kmalloc: - return result; - -} - - -/** - * i2400m_tx_release - Tear down the TX queue and infrastructure - */ -void i2400m_tx_release(struct i2400m *i2400m) -{ - unsigned long flags; - spin_lock_irqsave(&i2400m->tx_lock, flags); - kfree(i2400m->tx_buf); - i2400m->tx_buf = NULL; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); -} diff --git a/drivers/net/wimax/i2400m/usb-debug-levels.h b/drivers/net/wimax/i2400m/usb-debug-levels.h deleted file mode 100644 index b6f7335de765..000000000000 --- a/drivers/net/wimax/i2400m/usb-debug-levels.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Intel Wireless WiMAX Connection 2400m - * Debug levels control file for the i2400m-usb module - * - * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - */ -#ifndef __debug_levels__h__ -#define __debug_levels__h__ - -/* Maximum compile and run time debug level for all submodules */ -#define D_MODULENAME i2400m_usb -#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL - -#include <linux/wimax/debug.h> - -/* List of all the enabled modules */ -enum d_module { - D_SUBMODULE_DECLARE(usb), - D_SUBMODULE_DECLARE(fw), - D_SUBMODULE_DECLARE(notif), - D_SUBMODULE_DECLARE(rx), - D_SUBMODULE_DECLARE(tx), -}; - - -#endif /* #ifndef __debug_levels__h__ */ diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c deleted file mode 100644 index 27ab233650d5..000000000000 --- a/drivers/net/wimax/i2400m/usb-fw.c +++ /dev/null @@ -1,365 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * Firmware uploader's USB specifics - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Initial implementation - * - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - bus generic/specific split - * - * THE PROCEDURE - * - * See fw.c for the generic description of this procedure. - * - * This file implements only the USB specifics. It boils down to how - * to send a command and waiting for an acknowledgement from the - * device. - * - * This code (and process) is single threaded. It assumes it is the - * only thread poking around (guaranteed by fw.c). - * - * COMMAND EXECUTION - * - * A write URB is posted with the buffer to the bulk output endpoint. - * - * ACK RECEPTION - * - * We just post a URB to the notification endpoint and wait for - * data. We repeat until we get all the data we expect (as indicated - * by the call from the bus generic code). - * - * The data is not read from the bulk in endpoint for boot mode. - * - * ROADMAP - * - * i2400mu_bus_bm_cmd_send - * i2400m_bm_cmd_prepare... - * i2400mu_tx_bulk_out - * - * i2400mu_bus_bm_wait_for_ack - * i2400m_notif_submit - */ -#include <linux/usb.h> -#include <linux/gfp.h> -#include "i2400m-usb.h" - - -#define D_SUBMODULE fw -#include "usb-debug-levels.h" - - -/* - * Synchronous write to the device - * - * Takes care of updating EDC counts and thus, handle device errors. - */ -static -ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size) -{ - int result; - struct device *dev = &i2400mu->usb_iface->dev; - int len; - struct usb_endpoint_descriptor *epd; - int pipe, do_autopm = 1; - - result = usb_autopm_get_interface(i2400mu->usb_iface); - if (result < 0) { - dev_err(dev, "BM-CMD: can't get autopm: %d\n", result); - do_autopm = 0; - } - epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out); - pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); -retry: - result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, 200); - switch (result) { - case 0: - if (len != buf_size) { - dev_err(dev, "BM-CMD: short write (%u B vs %zu " - "expected)\n", len, buf_size); - result = -EIO; - break; - } - result = len; - break; - case -EPIPE: - /* - * Stall -- maybe the device is choking with our - * requests. Clear it and give it some time. If they - * happen to often, it might be another symptom, so we - * reset. - * - * No error handling for usb_clear_halt(0; if it - * works, the retry works; if it fails, this switch - * does the error handling for us. - */ - if (edc_inc(&i2400mu->urb_edc, - 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "BM-CMD: too many stalls in " - "URB; resetting device\n"); - usb_queue_reset_device(i2400mu->usb_iface); - } else { - usb_clear_halt(i2400mu->usb_dev, pipe); - msleep(10); /* give the device some time */ - goto retry; - } - fallthrough; - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* just ignore it */ - case -ESHUTDOWN: /* and exit */ - case -ECONNRESET: - result = -ESHUTDOWN; - break; - case -ETIMEDOUT: /* bah... */ - break; - default: /* any other? */ - if (edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "BM-CMD: maximum errors in " - "URB exceeded; resetting device\n"); - usb_queue_reset_device(i2400mu->usb_iface); - result = -ENODEV; - break; - } - dev_err(dev, "BM-CMD: URB error %d, retrying\n", - result); - goto retry; - } - if (do_autopm) - usb_autopm_put_interface(i2400mu->usb_iface); - return result; -} - - -/* - * Send a boot-mode command over the bulk-out pipe - * - * Command can be a raw command, which requires no preparation (and - * which might not even be following the command format). Checks that - * the right amount of data was transferred. - * - * To satisfy USB requirements (no onstack, vmalloc or in data segment - * buffers), we copy the command to i2400m->bm_cmd_buf and send it from - * there. - * - * @flags: pass thru from i2400m_bm_cmd() - * @return: cmd_size if ok, < 0 errno code on error. - */ -ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m, - const struct i2400m_bootrom_header *_cmd, - size_t cmd_size, int flags) -{ - ssize_t result; - struct device *dev = i2400m_dev(i2400m); - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd); - struct i2400m_bootrom_header *cmd; - size_t cmd_size_a = ALIGN(cmd_size, 16); /* USB restriction */ - - d_fnstart(8, dev, "(i2400m %p cmd %p size %zu)\n", - i2400m, _cmd, cmd_size); - result = -E2BIG; - if (cmd_size > I2400M_BM_CMD_BUF_SIZE) - goto error_too_big; - if (_cmd != i2400m->bm_cmd_buf) - memmove(i2400m->bm_cmd_buf, _cmd, cmd_size); - cmd = i2400m->bm_cmd_buf; - if (cmd_size_a > cmd_size) /* Zero pad space */ - memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); - if ((flags & I2400M_BM_CMD_RAW) == 0) { - if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0)) - dev_warn(dev, "SW BUG: response_required == 0\n"); - i2400m_bm_cmd_prepare(cmd); - } - result = i2400mu_tx_bulk_out(i2400mu, i2400m->bm_cmd_buf, cmd_size); - if (result < 0) { - dev_err(dev, "boot-mode cmd %d: cannot send: %zd\n", - opcode, result); - goto error_cmd_send; - } - if (result != cmd_size) { /* all was transferred? */ - dev_err(dev, "boot-mode cmd %d: incomplete transfer " - "(%zd vs %zu submitted)\n", opcode, result, cmd_size); - result = -EIO; - goto error_cmd_size; - } -error_cmd_size: -error_cmd_send: -error_too_big: - d_fnend(8, dev, "(i2400m %p cmd %p size %zu) = %zd\n", - i2400m, _cmd, cmd_size, result); - return result; -} - - -static -void __i2400mu_bm_notif_cb(struct urb *urb) -{ - complete(urb->context); -} - - -/* - * submit a read to the notification endpoint - * - * @i2400m: device descriptor - * @urb: urb to use - * @completion: completion variable to complete when done - * - * Data is always read to i2400m->bm_ack_buf - */ -static -int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb, - struct completion *completion) -{ - struct i2400m *i2400m = &i2400mu->i2400m; - struct usb_endpoint_descriptor *epd; - int pipe; - - epd = usb_get_epd(i2400mu->usb_iface, - i2400mu->endpoint_cfg.notification); - pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); - usb_fill_int_urb(urb, i2400mu->usb_dev, pipe, - i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE, - __i2400mu_bm_notif_cb, completion, - epd->bInterval); - return usb_submit_urb(urb, GFP_KERNEL); -} - - -/* - * Read an ack from the notification endpoint - * - * @i2400m: - * @_ack: pointer to where to store the read data - * @ack_size: how many bytes we should read - * - * Returns: < 0 errno code on error; otherwise, amount of received bytes. - * - * Submits a notification read, appends the read data to the given ack - * buffer and then repeats (until @ack_size bytes have been - * received). - */ -ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m, - struct i2400m_bootrom_header *_ack, - size_t ack_size) -{ - ssize_t result = -ENOMEM; - struct device *dev = i2400m_dev(i2400m); - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - struct urb notif_urb; - void *ack = _ack; - size_t offset, len; - long val; - int do_autopm = 1; - DECLARE_COMPLETION_ONSTACK(notif_completion); - - d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n", - i2400m, ack, ack_size); - BUG_ON(_ack == i2400m->bm_ack_buf); - result = usb_autopm_get_interface(i2400mu->usb_iface); - if (result < 0) { - dev_err(dev, "BM-ACK: can't get autopm: %d\n", (int) result); - do_autopm = 0; - } - usb_init_urb(¬if_urb); /* ready notifications */ - usb_get_urb(¬if_urb); - offset = 0; - while (offset < ack_size) { - init_completion(¬if_completion); - result = i2400mu_notif_submit(i2400mu, ¬if_urb, - ¬if_completion); - if (result < 0) - goto error_notif_urb_submit; - val = wait_for_completion_interruptible_timeout( - ¬if_completion, HZ); - if (val == 0) { - result = -ETIMEDOUT; - usb_kill_urb(¬if_urb); /* Timedout */ - goto error_notif_wait; - } - if (val == -ERESTARTSYS) { - result = -EINTR; /* Interrupted */ - usb_kill_urb(¬if_urb); - goto error_notif_wait; - } - result = notif_urb.status; /* How was the ack? */ - switch (result) { - case 0: - break; - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* just ignore it */ - case -ESHUTDOWN: /* and exit */ - case -ECONNRESET: - result = -ESHUTDOWN; - goto error_dev_gone; - default: /* any other? */ - usb_kill_urb(¬if_urb); /* Timedout */ - if (edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) - goto error_exceeded; - dev_err(dev, "BM-ACK: URB error %d, " - "retrying\n", notif_urb.status); - continue; /* retry */ - } - if (notif_urb.actual_length == 0) { - d_printf(6, dev, "ZLP received, retrying\n"); - continue; - } - /* Got data, append it to the buffer */ - len = min(ack_size - offset, (size_t) notif_urb.actual_length); - memcpy(ack + offset, i2400m->bm_ack_buf, len); - offset += len; - } - result = offset; -error_notif_urb_submit: -error_notif_wait: -error_dev_gone: -out: - if (do_autopm) - usb_autopm_put_interface(i2400mu->usb_iface); - d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n", - i2400m, ack, ack_size, (long) result); - usb_put_urb(¬if_urb); - return result; - -error_exceeded: - dev_err(dev, "bm: maximum errors in notification URB exceeded; " - "resetting device\n"); - usb_queue_reset_device(i2400mu->usb_iface); - goto out; -} diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c deleted file mode 100644 index 5d429f816125..000000000000 --- a/drivers/net/wimax/i2400m/usb-notif.c +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m over USB - * Notification handling - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Initial implementation - * - * - * The notification endpoint is active when the device is not in boot - * mode; in here we just read and get notifications; based on those, - * we act to either reinitialize the device after a reboot or to - * submit a RX request. - * - * ROADMAP - * - * i2400mu_usb_notification_setup() - * - * i2400mu_usb_notification_release() - * - * i2400mu_usb_notification_cb() Called when a URB is ready - * i2400mu_notif_grok() - * i2400m_is_boot_barker() - * i2400m_dev_reset_handle() - * i2400mu_rx_kick() - */ -#include <linux/usb.h> -#include <linux/slab.h> -#include "i2400m-usb.h" - - -#define D_SUBMODULE notif -#include "usb-debug-levels.h" - - -static const -__le32 i2400m_ZERO_BARKER[4] = { 0, 0, 0, 0 }; - - -/* - * Process a received notification - * - * In normal operation mode, we can only receive two types of payloads - * on the notification endpoint: - * - * - a reboot barker, we do a bootstrap (the device has reseted). - * - * - a block of zeroes: there is pending data in the IN endpoint - */ -static -int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf, - size_t buf_len) -{ - int ret; - struct device *dev = &i2400mu->usb_iface->dev; - struct i2400m *i2400m = &i2400mu->i2400m; - - d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n", - i2400mu, buf, buf_len); - ret = -EIO; - if (buf_len < sizeof(i2400m_ZERO_BARKER)) - /* Not a bug, just ignore */ - goto error_bad_size; - ret = 0; - if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) { - i2400mu_rx_kick(i2400mu); - goto out; - } - ret = i2400m_is_boot_barker(i2400m, buf, buf_len); - if (unlikely(ret >= 0)) - ret = i2400m_dev_reset_handle(i2400m, "device rebooted"); - else /* Unknown or unexpected data in the notif message */ - i2400m_unknown_barker(i2400m, buf, buf_len); -error_bad_size: -out: - d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n", - i2400mu, buf, buf_len, ret); - return ret; -} - - -/* - * URB callback for the notification endpoint - * - * @urb: the urb received from the notification endpoint - * - * This function will just process the USB side of the transaction, - * checking everything is fine, pass the processing to - * i2400m_notification_grok() and resubmit the URB. - */ -static -void i2400mu_notification_cb(struct urb *urb) -{ - int ret; - struct i2400mu *i2400mu = urb->context; - struct device *dev = &i2400mu->usb_iface->dev; - - d_fnstart(4, dev, "(urb %p status %d actual_length %d)\n", - urb, urb->status, urb->actual_length); - ret = urb->status; - switch (ret) { - case 0: - ret = i2400mu_notification_grok(i2400mu, urb->transfer_buffer, - urb->actual_length); - if (ret == -EIO && edc_inc(&i2400mu->urb_edc, EDC_MAX_ERRORS, - EDC_ERROR_TIMEFRAME)) - goto error_exceeded; - if (ret == -ENOMEM) /* uff...power cycle? shutdown? */ - goto error_exceeded; - break; - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* ditto */ - case -ESHUTDOWN: /* URB killed */ - case -ECONNRESET: /* disconnection */ - goto out; /* Notify around */ - default: /* Some error? */ - if (edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) - goto error_exceeded; - dev_err(dev, "notification: URB error %d, retrying\n", - urb->status); - } - usb_mark_last_busy(i2400mu->usb_dev); - ret = usb_submit_urb(i2400mu->notif_urb, GFP_ATOMIC); - switch (ret) { - case 0: - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* ditto */ - case -ESHUTDOWN: /* URB killed */ - case -ECONNRESET: /* disconnection */ - break; /* just ignore */ - default: /* Some error? */ - dev_err(dev, "notification: cannot submit URB: %d\n", ret); - goto error_submit; - } - d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", - urb, urb->status, urb->actual_length); - return; - -error_exceeded: - dev_err(dev, "maximum errors in notification URB exceeded; " - "resetting device\n"); -error_submit: - usb_queue_reset_device(i2400mu->usb_iface); -out: - d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", - urb, urb->status, urb->actual_length); -} - - -/* - * setup the notification endpoint - * - * @i2400m: device descriptor - * - * This procedure prepares the notification urb and handler for receiving - * unsolicited barkers from the device. - */ -int i2400mu_notification_setup(struct i2400mu *i2400mu) -{ - struct device *dev = &i2400mu->usb_iface->dev; - int usb_pipe, ret = 0; - struct usb_endpoint_descriptor *epd; - char *buf; - - d_fnstart(4, dev, "(i2400m %p)\n", i2400mu); - buf = kmalloc(I2400MU_MAX_NOTIFICATION_LEN, GFP_KERNEL | GFP_DMA); - if (buf == NULL) { - ret = -ENOMEM; - goto error_buf_alloc; - } - - i2400mu->notif_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!i2400mu->notif_urb) { - ret = -ENOMEM; - goto error_alloc_urb; - } - epd = usb_get_epd(i2400mu->usb_iface, - i2400mu->endpoint_cfg.notification); - usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); - usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe, - buf, I2400MU_MAX_NOTIFICATION_LEN, - i2400mu_notification_cb, i2400mu, epd->bInterval); - ret = usb_submit_urb(i2400mu->notif_urb, GFP_KERNEL); - if (ret != 0) { - dev_err(dev, "notification: cannot submit URB: %d\n", ret); - goto error_submit; - } - d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret); - return ret; - -error_submit: - usb_free_urb(i2400mu->notif_urb); -error_alloc_urb: - kfree(buf); -error_buf_alloc: - d_fnend(4, dev, "(i2400m %p) = %d\n", i2400mu, ret); - return ret; -} - - -/* - * Tear down of the notification mechanism - * - * @i2400m: device descriptor - * - * Kill the interrupt endpoint urb, free any allocated resources. - * - * We need to check if we have done it before as for example, - * _suspend() call this; if after a suspend() we get a _disconnect() - * (as the case is when hibernating), nothing bad happens. - */ -void i2400mu_notification_release(struct i2400mu *i2400mu) -{ - struct device *dev = &i2400mu->usb_iface->dev; - - d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); - if (i2400mu->notif_urb != NULL) { - usb_kill_urb(i2400mu->notif_urb); - kfree(i2400mu->notif_urb->transfer_buffer); - usb_free_urb(i2400mu->notif_urb); - i2400mu->notif_urb = NULL; - } - d_fnend(4, dev, "(i2400mu %p)\n", i2400mu); -} diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c deleted file mode 100644 index 5b64bda7d9e7..000000000000 --- a/drivers/net/wimax/i2400m/usb-rx.c +++ /dev/null @@ -1,462 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * USB RX handling - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - Initial implementation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Use skb_clone(), break up processing in chunks - * - Split transport/device specific - * - Make buffer size dynamic to exert less memory pressure - * - * - * This handles the RX path on USB. - * - * When a notification is received that says 'there is RX data ready', - * we call i2400mu_rx_kick(); that wakes up the RX kthread, which - * reads a buffer from USB and passes it to i2400m_rx() in the generic - * handling code. The RX buffer has an specific format that is - * described in rx.c. - * - * We use a kernel thread in a loop because: - * - * - we want to be able to call the USB power management get/put - * functions (blocking) before each transaction. - * - * - We might get a lot of notifications and we don't want to submit - * a zillion reads; by serializing, we are throttling. - * - * - RX data processing can get heavy enough so that it is not - * appropriate for doing it in the USB callback; thus we run it in a - * process context. - * - * We provide a read buffer of an arbitrary size (short of a page); if - * the callback reports -EOVERFLOW, it means it was too small, so we - * just double the size and retry (being careful to append, as - * sometimes the device provided some data). Every now and then we - * check if the average packet size is smaller than the current packet - * size and if so, we halve it. At the end, the size of the - * preallocated buffer should be following the average received - * transaction size, adapting dynamically to it. - * - * ROADMAP - * - * i2400mu_rx_kick() Called from notif.c when we get a - * 'data ready' notification - * i2400mu_rxd() Kernel RX daemon - * i2400mu_rx() Receive USB data - * i2400m_rx() Send data to generic i2400m RX handling - * - * i2400mu_rx_setup() called from i2400mu_bus_dev_start() - * - * i2400mu_rx_release() called from i2400mu_bus_dev_stop() - */ -#include <linux/workqueue.h> -#include <linux/slab.h> -#include <linux/usb.h> -#include "i2400m-usb.h" - - -#define D_SUBMODULE rx -#include "usb-debug-levels.h" - -/* - * Dynamic RX size - * - * We can't let the rx_size be a multiple of 512 bytes (the RX - * endpoint's max packet size). On some USB host controllers (we - * haven't been able to fully characterize which), if the device is - * about to send (for example) X bytes and we only post a buffer to - * receive n*512, it will fail to mark that as babble (so that - * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the - * rest). - * - * So on growing or shrinking, if it is a multiple of the - * maxpacketsize, we remove some (instead of incresing some, so in a - * buddy allocator we try to waste less space). - * - * Note we also need a hook for this on i2400mu_rx() -- when we do the - * first read, we are sure we won't hit this spot because - * i240mm->rx_size has been set properly. However, if we have to - * double because of -EOVERFLOW, when we launch the read to get the - * rest of the data, we *have* to make sure that also is not a - * multiple of the max_pkt_size. - */ - -static -size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu) -{ - struct device *dev = &i2400mu->usb_iface->dev; - size_t rx_size; - const size_t max_pkt_size = 512; - - rx_size = 2 * i2400mu->rx_size; - if (rx_size % max_pkt_size == 0) { - rx_size -= 8; - d_printf(1, dev, - "RX: expected size grew to %zu [adjusted -8] " - "from %zu\n", - rx_size, i2400mu->rx_size); - } else - d_printf(1, dev, - "RX: expected size grew to %zu from %zu\n", - rx_size, i2400mu->rx_size); - return rx_size; -} - - -static -void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu) -{ - const size_t max_pkt_size = 512; - struct device *dev = &i2400mu->usb_iface->dev; - - if (unlikely(i2400mu->rx_size_cnt >= 100 - && i2400mu->rx_size_auto_shrink)) { - size_t avg_rx_size = - i2400mu->rx_size_acc / i2400mu->rx_size_cnt; - size_t new_rx_size = i2400mu->rx_size / 2; - if (avg_rx_size < new_rx_size) { - if (new_rx_size % max_pkt_size == 0) { - new_rx_size -= 8; - d_printf(1, dev, - "RX: expected size shrank to %zu " - "[adjusted -8] from %zu\n", - new_rx_size, i2400mu->rx_size); - } else - d_printf(1, dev, - "RX: expected size shrank to %zu " - "from %zu\n", - new_rx_size, i2400mu->rx_size); - i2400mu->rx_size = new_rx_size; - i2400mu->rx_size_cnt = 0; - i2400mu->rx_size_acc = i2400mu->rx_size; - } - } -} - -/* - * Receive a message with payloads from the USB bus into an skb - * - * @i2400mu: USB device descriptor - * @rx_skb: skb where to place the received message - * - * Deals with all the USB-specifics of receiving, dynamically - * increasing the buffer size if so needed. Returns the payload in the - * skb, ready to process. On a zero-length packet, we retry. - * - * On soft USB errors, we retry (until they become too frequent and - * then are promoted to hard); on hard USB errors, we reset the - * device. On other errors (skb realloacation, we just drop it and - * hope for the next invocation to solve it). - * - * Returns: pointer to the skb if ok, ERR_PTR on error. - * NOTE: this function might realloc the skb (if it is too small), - * so always update with the one returned. - * ERR_PTR() is < 0 on error. - * Will return NULL if it cannot reallocate -- this can be - * considered a transient retryable error. - */ -static -struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) -{ - int result = 0; - struct device *dev = &i2400mu->usb_iface->dev; - int usb_pipe, read_size, rx_size, do_autopm; - struct usb_endpoint_descriptor *epd; - const size_t max_pkt_size = 512; - - d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); - do_autopm = atomic_read(&i2400mu->do_autopm); - result = do_autopm ? - usb_autopm_get_interface(i2400mu->usb_iface) : 0; - if (result < 0) { - dev_err(dev, "RX: can't get autopm: %d\n", result); - do_autopm = 0; - } - epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in); - usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); -retry: - rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len; - if (unlikely(rx_size % max_pkt_size == 0)) { - rx_size -= 8; - d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size); - } - result = usb_bulk_msg( - i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len, - rx_size, &read_size, 200); - usb_mark_last_busy(i2400mu->usb_dev); - switch (result) { - case 0: - if (read_size == 0) - goto retry; /* ZLP, just resubmit */ - skb_put(rx_skb, read_size); - break; - case -EPIPE: - /* - * Stall -- maybe the device is choking with our - * requests. Clear it and give it some time. If they - * happen to often, it might be another symptom, so we - * reset. - * - * No error handling for usb_clear_halt(0; if it - * works, the retry works; if it fails, this switch - * does the error handling for us. - */ - if (edc_inc(&i2400mu->urb_edc, - 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "BM-CMD: too many stalls in " - "URB; resetting device\n"); - goto do_reset; - } - usb_clear_halt(i2400mu->usb_dev, usb_pipe); - msleep(10); /* give the device some time */ - goto retry; - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* just ignore it */ - case -ESHUTDOWN: - case -ECONNRESET: - break; - case -EOVERFLOW: { /* too small, reallocate */ - struct sk_buff *new_skb; - rx_size = i2400mu_rx_size_grow(i2400mu); - if (rx_size <= (1 << 16)) /* cap it */ - i2400mu->rx_size = rx_size; - else if (printk_ratelimit()) { - dev_err(dev, "BUG? rx_size up to %d\n", rx_size); - result = -EINVAL; - goto out; - } - skb_put(rx_skb, read_size); - new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len, - GFP_KERNEL); - if (new_skb == NULL) { - kfree_skb(rx_skb); - rx_skb = NULL; - goto out; /* drop it...*/ - } - kfree_skb(rx_skb); - rx_skb = new_skb; - i2400mu->rx_size_cnt = 0; - i2400mu->rx_size_acc = i2400mu->rx_size; - d_printf(1, dev, "RX: size changed to %d, received %d, " - "copied %d, capacity %ld\n", - rx_size, read_size, rx_skb->len, - (long) skb_end_offset(new_skb)); - goto retry; - } - /* In most cases, it happens due to the hardware scheduling a - * read when there was no data - unfortunately, we have no way - * to tell this timeout from a USB timeout. So we just ignore - * it. */ - case -ETIMEDOUT: - dev_err(dev, "RX: timeout: %d\n", result); - result = 0; - break; - default: /* Any error */ - if (edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) - goto error_reset; - dev_err(dev, "RX: error receiving URB: %d, retrying\n", result); - goto retry; - } -out: - if (do_autopm) - usb_autopm_put_interface(i2400mu->usb_iface); - d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb); - return rx_skb; - -error_reset: - dev_err(dev, "RX: maximum errors in URB exceeded; " - "resetting device\n"); -do_reset: - usb_queue_reset_device(i2400mu->usb_iface); - rx_skb = ERR_PTR(result); - goto out; -} - - -/* - * Kernel thread for USB reception of data - * - * This thread waits for a kick; once kicked, it will allocate an skb - * and receive a single message to it from USB (using - * i2400mu_rx()). Once received, it is passed to the generic i2400m RX - * code for processing. - * - * When done processing, it runs some dirty statistics to verify if - * the last 100 messages received were smaller than half of the - * current RX buffer size. In that case, the RX buffer size is - * halved. This will helps lowering the pressure on the memory - * allocator. - * - * Hard errors force the thread to exit. - */ -static -int i2400mu_rxd(void *_i2400mu) -{ - int result = 0; - struct i2400mu *i2400mu = _i2400mu; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = &i2400mu->usb_iface->dev; - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - size_t pending; - int rx_size; - struct sk_buff *rx_skb; - unsigned long flags; - - d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); - spin_lock_irqsave(&i2400m->rx_lock, flags); - BUG_ON(i2400mu->rx_kthread != NULL); - i2400mu->rx_kthread = current; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - while (1) { - d_printf(2, dev, "RX: waiting for messages\n"); - pending = 0; - wait_event_interruptible( - i2400mu->rx_wq, - (kthread_should_stop() /* check this first! */ - || (pending = atomic_read(&i2400mu->rx_pending_count))) - ); - if (kthread_should_stop()) - break; - if (pending == 0) - continue; - rx_size = i2400mu->rx_size; - d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size); - rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL); - if (rx_skb == NULL) { - dev_err(dev, "RX: can't allocate skb [%d bytes]\n", - rx_size); - msleep(50); /* give it some time? */ - continue; - } - - /* Receive the message with the payloads */ - rx_skb = i2400mu_rx(i2400mu, rx_skb); - result = PTR_ERR(rx_skb); - if (IS_ERR(rx_skb)) - goto out; - atomic_dec(&i2400mu->rx_pending_count); - if (rx_skb == NULL || rx_skb->len == 0) { - /* some "ignorable" condition */ - kfree_skb(rx_skb); - continue; - } - - /* Deliver the message to the generic i2400m code */ - i2400mu->rx_size_cnt++; - i2400mu->rx_size_acc += rx_skb->len; - result = i2400m_rx(i2400m, rx_skb); - if (result == -EIO - && edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - goto error_reset; - } - - /* Maybe adjust RX buffer size */ - i2400mu_rx_size_maybe_shrink(i2400mu); - } - result = 0; -out: - spin_lock_irqsave(&i2400m->rx_lock, flags); - i2400mu->rx_kthread = NULL; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); - return result; - -error_reset: - dev_err(dev, "RX: maximum errors in received buffer exceeded; " - "resetting device\n"); - usb_queue_reset_device(i2400mu->usb_iface); - goto out; -} - - -/* - * Start reading from the device - * - * @i2400m: device instance - * - * Notify the RX thread that there is data pending. - */ -void i2400mu_rx_kick(struct i2400mu *i2400mu) -{ - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = &i2400mu->usb_iface->dev; - - d_fnstart(3, dev, "(i2400mu %p)\n", i2400m); - atomic_inc(&i2400mu->rx_pending_count); - wake_up_all(&i2400mu->rx_wq); - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} - - -int i2400mu_rx_setup(struct i2400mu *i2400mu) -{ - int result = 0; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = &i2400mu->usb_iface->dev; - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - struct task_struct *kthread; - - kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx", - wimax_dev->name); - /* the kthread function sets i2400mu->rx_thread */ - if (IS_ERR(kthread)) { - result = PTR_ERR(kthread); - dev_err(dev, "RX: cannot start thread: %d\n", result); - } - return result; -} - - -void i2400mu_rx_release(struct i2400mu *i2400mu) -{ - unsigned long flags; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = i2400m_dev(i2400m); - struct task_struct *kthread; - - spin_lock_irqsave(&i2400m->rx_lock, flags); - kthread = i2400mu->rx_kthread; - i2400mu->rx_kthread = NULL; - spin_unlock_irqrestore(&i2400m->rx_lock, flags); - if (kthread) - kthread_stop(kthread); - else - d_printf(1, dev, "RX: kthread had already exited\n"); -} - diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c deleted file mode 100644 index 3ba9d70cca1b..000000000000 --- a/drivers/net/wimax/i2400m/usb-tx.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Intel Wireless WiMAX Connection 2400m - * USB specific TX handling - * - * - * Copyright (C) 2007-2008 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * - * Intel Corporation <linux-wimax@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - Initial implementation - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * - Split transport/device specific - * - * - * Takes the TX messages in the i2400m's driver TX FIFO and sends them - * to the device until there are no more. - * - * If we fail sending the message, we just drop it. There isn't much - * we can do at this point. We could also retry, but the USB stack has - * already retried and still failed, so there is not much of a - * point. As well, most of the traffic is network, which has recovery - * methods for dropped packets. - * - * For sending we just obtain a FIFO buffer to send, send it to the - * USB bulk out, tell the TX FIFO code we have sent it; query for - * another one, etc... until done. - * - * We use a thread so we can call usb_autopm_enable() and - * usb_autopm_disable() for each transaction; this way when the device - * goes idle, it will suspend. It also has less overhead than a - * dedicated workqueue, as it is being used for a single task. - * - * ROADMAP - * - * i2400mu_tx_setup() - * i2400mu_tx_release() - * - * i2400mu_bus_tx_kick() - Called by the tx.c code when there - * is new data in the FIFO. - * i2400mu_txd() - * i2400m_tx_msg_get() - * i2400m_tx_msg_sent() - */ -#include "i2400m-usb.h" - - -#define D_SUBMODULE tx -#include "usb-debug-levels.h" - - -/* - * Get the next TX message in the TX FIFO and send it to the device - * - * Note that any iteration consumes a message to be sent, no matter if - * it succeeds or fails (we have no real way to retry or complain). - * - * Return: 0 if ok, < 0 errno code on hard error. - */ -static -int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg, - size_t tx_msg_size) -{ - int result = 0; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = &i2400mu->usb_iface->dev; - int usb_pipe, sent_size, do_autopm; - struct usb_endpoint_descriptor *epd; - - d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); - do_autopm = atomic_read(&i2400mu->do_autopm); - result = do_autopm ? - usb_autopm_get_interface(i2400mu->usb_iface) : 0; - if (result < 0) { - dev_err(dev, "TX: can't get autopm: %d\n", result); - do_autopm = 0; - } - epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out); - usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); -retry: - result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe, - tx_msg, tx_msg_size, &sent_size, 200); - usb_mark_last_busy(i2400mu->usb_dev); - switch (result) { - case 0: - if (sent_size != tx_msg_size) { /* Too short? drop it */ - dev_err(dev, "TX: short write (%d B vs %zu " - "expected)\n", sent_size, tx_msg_size); - result = -EIO; - } - break; - case -EPIPE: - /* - * Stall -- maybe the device is choking with our - * requests. Clear it and give it some time. If they - * happen to often, it might be another symptom, so we - * reset. - * - * No error handling for usb_clear_halt(0; if it - * works, the retry works; if it fails, this switch - * does the error handling for us. - */ - if (edc_inc(&i2400mu->urb_edc, - 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "BM-CMD: too many stalls in " - "URB; resetting device\n"); - usb_queue_reset_device(i2400mu->usb_iface); - } else { - usb_clear_halt(i2400mu->usb_dev, usb_pipe); - msleep(10); /* give the device some time */ - goto retry; - } - fallthrough; - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* just ignore it */ - case -ESHUTDOWN: /* and exit */ - case -ECONNRESET: - result = -ESHUTDOWN; - break; - default: /* Some error? */ - if (edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "TX: maximum errors in URB " - "exceeded; resetting device\n"); - usb_queue_reset_device(i2400mu->usb_iface); - } else { - dev_err(dev, "TX: cannot send URB; retrying. " - "tx_msg @%zu %zu B [%d sent]: %d\n", - (void *) tx_msg - i2400m->tx_buf, - tx_msg_size, sent_size, result); - goto retry; - } - } - if (do_autopm) - usb_autopm_put_interface(i2400mu->usb_iface); - d_fnend(4, dev, "(i2400mu %p) = result\n", i2400mu); - return result; -} - - -/* - * Get the next TX message in the TX FIFO and send it to the device - * - * Note we exit the loop if i2400mu_tx() fails; that function only - * fails on hard error (failing to tx a buffer not being one of them, - * see its doc). - * - * Return: 0 - */ -static -int i2400mu_txd(void *_i2400mu) -{ - struct i2400mu *i2400mu = _i2400mu; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = &i2400mu->usb_iface->dev; - struct i2400m_msg_hdr *tx_msg; - size_t tx_msg_size; - unsigned long flags; - - d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); - - spin_lock_irqsave(&i2400m->tx_lock, flags); - BUG_ON(i2400mu->tx_kthread != NULL); - i2400mu->tx_kthread = current; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - - while (1) { - d_printf(2, dev, "TX: waiting for messages\n"); - tx_msg = NULL; - wait_event_interruptible( - i2400mu->tx_wq, - (kthread_should_stop() /* check this first! */ - || (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) - ); - if (kthread_should_stop()) - break; - WARN_ON(tx_msg == NULL); /* should not happen...*/ - d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size); - d_dump(5, dev, tx_msg, tx_msg_size); - /* Yeah, we ignore errors ... not much we can do */ - i2400mu_tx(i2400mu, tx_msg, tx_msg_size); - i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */ - } - - spin_lock_irqsave(&i2400m->tx_lock, flags); - i2400mu->tx_kthread = NULL; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - - d_fnend(4, dev, "(i2400mu %p)\n", i2400mu); - return 0; -} - - -/* - * i2400m TX engine notifies us that there is data in the FIFO ready - * for TX - * - * If there is a URB in flight, don't do anything; when it finishes, - * it will see there is data in the FIFO and send it. Else, just - * submit a write. - */ -void i2400mu_bus_tx_kick(struct i2400m *i2400m) -{ - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - struct device *dev = &i2400mu->usb_iface->dev; - - d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m); - wake_up_all(&i2400mu->tx_wq); - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} - - -int i2400mu_tx_setup(struct i2400mu *i2400mu) -{ - int result = 0; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = &i2400mu->usb_iface->dev; - struct wimax_dev *wimax_dev = &i2400m->wimax_dev; - struct task_struct *kthread; - - kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx", - wimax_dev->name); - /* the kthread function sets i2400mu->tx_thread */ - if (IS_ERR(kthread)) { - result = PTR_ERR(kthread); - dev_err(dev, "TX: cannot start thread: %d\n", result); - } - return result; -} - -void i2400mu_tx_release(struct i2400mu *i2400mu) -{ - unsigned long flags; - struct i2400m *i2400m = &i2400mu->i2400m; - struct device *dev = i2400m_dev(i2400m); - struct task_struct *kthread; - - spin_lock_irqsave(&i2400m->tx_lock, flags); - kthread = i2400mu->tx_kthread; - i2400mu->tx_kthread = NULL; - spin_unlock_irqrestore(&i2400m->tx_lock, flags); - if (kthread) - kthread_stop(kthread); - else - d_printf(1, dev, "TX: kthread had already exited\n"); -} diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c deleted file mode 100644 index b684e97ac976..000000000000 --- a/drivers/net/wimax/i2400m/usb.c +++ /dev/null @@ -1,764 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Intel Wireless WiMAX Connection 2400m - * Linux driver model glue for USB device, reset & fw upload - * - * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com> - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> - * Yanir Lubetkin <yanirx.lubetkin@intel.com> - * - * See i2400m-usb.h for a general description of this driver. - * - * This file implements driver model glue, and hook ups for the - * generic driver to implement the bus-specific functions (device - * communication setup/tear down, firmware upload and resetting). - * - * ROADMAP - * - * i2400mu_probe() - * alloc_netdev()... - * i2400mu_netdev_setup() - * i2400mu_init() - * i2400m_netdev_setup() - * i2400m_setup()... - * - * i2400mu_disconnect - * i2400m_release() - * free_netdev() - * - * i2400mu_suspend() - * i2400m_cmd_enter_powersave() - * i2400mu_notification_release() - * - * i2400mu_resume() - * i2400mu_notification_setup() - * - * i2400mu_bus_dev_start() Called by i2400m_dev_start() [who is - * i2400mu_tx_setup() called by i2400m_setup()] - * i2400mu_rx_setup() - * i2400mu_notification_setup() - * - * i2400mu_bus_dev_stop() Called by i2400m_dev_stop() [who is - * i2400mu_notification_release() called by i2400m_release()] - * i2400mu_rx_release() - * i2400mu_tx_release() - * - * i2400mu_bus_reset() Called by i2400m_reset - * __i2400mu_reset() - * __i2400mu_send_barker() - * usb_reset_device() - */ -#include "i2400m-usb.h" -#include <linux/wimax/i2400m.h> -#include <linux/debugfs.h> -#include <linux/slab.h> -#include <linux/module.h> - - -#define D_SUBMODULE usb -#include "usb-debug-levels.h" - -static char i2400mu_debug_params[128]; -module_param_string(debug, i2400mu_debug_params, sizeof(i2400mu_debug_params), - 0644); -MODULE_PARM_DESC(debug, - "String of space-separated NAME:VALUE pairs, where NAMEs " - "are the different debug submodules and VALUE are the " - "initial debug value to set."); - -/* Our firmware file name */ -static const char *i2400mu_bus_fw_names_5x50[] = { -#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf" - I2400MU_FW_FILE_NAME_v1_5, -#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf" - I2400MU_FW_FILE_NAME_v1_4, - NULL, -}; - - -static const char *i2400mu_bus_fw_names_6050[] = { -#define I6050U_FW_FILE_NAME_v1_5 "i6050-fw-usb-1.5.sbcf" - I6050U_FW_FILE_NAME_v1_5, - NULL, -}; - - -static -int i2400mu_bus_dev_start(struct i2400m *i2400m) -{ - int result; - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - struct device *dev = &i2400mu->usb_iface->dev; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - result = i2400mu_tx_setup(i2400mu); - if (result < 0) - goto error_usb_tx_setup; - result = i2400mu_rx_setup(i2400mu); - if (result < 0) - goto error_usb_rx_setup; - result = i2400mu_notification_setup(i2400mu); - if (result < 0) - goto error_notif_setup; - d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); - return result; - -error_notif_setup: - i2400mu_rx_release(i2400mu); -error_usb_rx_setup: - i2400mu_tx_release(i2400mu); -error_usb_tx_setup: - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); - return result; -} - - -static -void i2400mu_bus_dev_stop(struct i2400m *i2400m) -{ - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - struct device *dev = &i2400mu->usb_iface->dev; - - d_fnstart(3, dev, "(i2400m %p)\n", i2400m); - i2400mu_notification_release(i2400mu); - i2400mu_rx_release(i2400mu); - i2400mu_tx_release(i2400mu); - d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); -} - - -/* - * Sends a barker buffer to the device - * - * This helper will allocate a kmalloced buffer and use it to transmit - * (then free it). Reason for this is that other arches cannot use - * stack/vmalloc/text areas for DMA transfers. - * - * Error recovery here is simpler: anything is considered a hard error - * and will move the reset code to use a last-resort bus-based reset. - */ -static -int __i2400mu_send_barker(struct i2400mu *i2400mu, - const __le32 *barker, - size_t barker_size, - unsigned endpoint) -{ - struct usb_endpoint_descriptor *epd = NULL; - int pipe, actual_len, ret; - struct device *dev = &i2400mu->usb_iface->dev; - void *buffer; - int do_autopm = 1; - - ret = usb_autopm_get_interface(i2400mu->usb_iface); - if (ret < 0) { - dev_err(dev, "RESET: can't get autopm: %d\n", ret); - do_autopm = 0; - } - ret = -ENOMEM; - buffer = kmalloc(barker_size, GFP_KERNEL); - if (buffer == NULL) - goto error_kzalloc; - epd = usb_get_epd(i2400mu->usb_iface, endpoint); - pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); - memcpy(buffer, barker, barker_size); -retry: - ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size, - &actual_len, 200); - switch (ret) { - case 0: - if (actual_len != barker_size) { /* Too short? drop it */ - dev_err(dev, "E: %s: short write (%d B vs %zu " - "expected)\n", - __func__, actual_len, barker_size); - ret = -EIO; - } - break; - case -EPIPE: - /* - * Stall -- maybe the device is choking with our - * requests. Clear it and give it some time. If they - * happen to often, it might be another symptom, so we - * reset. - * - * No error handling for usb_clear_halt(0; if it - * works, the retry works; if it fails, this switch - * does the error handling for us. - */ - if (edc_inc(&i2400mu->urb_edc, - 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "E: %s: too many stalls in " - "URB; resetting device\n", __func__); - usb_queue_reset_device(i2400mu->usb_iface); - /* fallthrough */ - } else { - usb_clear_halt(i2400mu->usb_dev, pipe); - msleep(10); /* give the device some time */ - goto retry; - } - fallthrough; - case -EINVAL: /* while removing driver */ - case -ENODEV: /* dev disconnect ... */ - case -ENOENT: /* just ignore it */ - case -ESHUTDOWN: /* and exit */ - case -ECONNRESET: - ret = -ESHUTDOWN; - break; - default: /* Some error? */ - if (edc_inc(&i2400mu->urb_edc, - EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { - dev_err(dev, "E: %s: maximum errors in URB " - "exceeded; resetting device\n", - __func__); - usb_queue_reset_device(i2400mu->usb_iface); - } else { - dev_warn(dev, "W: %s: cannot send URB: %d\n", - __func__, ret); - goto retry; - } - } - kfree(buffer); -error_kzalloc: - if (do_autopm) - usb_autopm_put_interface(i2400mu->usb_iface); - return ret; -} - - -/* - * Reset a device at different levels (warm, cold or bus) - * - * @i2400m: device descriptor - * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS) - * - * Warm and cold resets get a USB reset if they fail. - * - * Warm reset: - * - * The device will be fully reset internally, but won't be - * disconnected from the USB bus (so no reenumeration will - * happen). Firmware upload will be necessary. - * - * The device will send a reboot barker in the notification endpoint - * that will trigger the driver to reinitialize the state - * automatically from notif.c:i2400m_notification_grok() into - * i2400m_dev_bootstrap_delayed(). - * - * Cold and bus (USB) reset: - * - * The device will be fully reset internally, disconnected from the - * USB bus an a reenumeration will happen. Firmware upload will be - * necessary. Thus, we don't do any locking or struct - * reinitialization, as we are going to be fully disconnected and - * reenumerated. - * - * Note we need to return -ENODEV if a warm reset was requested and we - * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset() - * and wimax_dev->op_reset. - * - * WARNING: no driver state saved/fixed - */ -static -int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt) -{ - int result; - struct i2400mu *i2400mu = - container_of(i2400m, struct i2400mu, i2400m); - struct device *dev = i2400m_dev(i2400m); - static const __le32 i2400m_WARM_BOOT_BARKER[4] = { - cpu_to_le32(I2400M_WARM_RESET_BARKER), - cpu_to_le32(I2400M_WARM_RESET_BARKER), - cpu_to_le32(I2400M_WARM_RESET_BARKER), - cpu_to_le32(I2400M_WARM_RESET_BARKER), - }; - static const __le32 i2400m_COLD_BOOT_BARKER[4] = { - cpu_to_le32(I2400M_COLD_RESET_BARKER), - cpu_to_le32(I2400M_COLD_RESET_BARKER), - cpu_to_le32(I2400M_COLD_RESET_BARKER), - cpu_to_le32(I2400M_COLD_RESET_BARKER), - }; - - d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); - if (rt == I2400M_RT_WARM) - result = __i2400mu_send_barker( - i2400mu, i2400m_WARM_BOOT_BARKER, - sizeof(i2400m_WARM_BOOT_BARKER), - i2400mu->endpoint_cfg.bulk_out); - else if (rt == I2400M_RT_COLD) - result = __i2400mu_send_barker( - i2400mu, i2400m_COLD_BOOT_BARKER, - sizeof(i2400m_COLD_BOOT_BARKER), - i2400mu->endpoint_cfg.reset_cold); - else if (rt == I2400M_RT_BUS) { - result = usb_reset_device(i2400mu->usb_dev); - switch (result) { - case 0: - case -EINVAL: /* device is gone */ - case -ENODEV: - case -ENOENT: - case -ESHUTDOWN: - result = 0; - break; /* We assume the device is disconnected */ - default: - dev_err(dev, "USB reset failed (%d), giving up!\n", - result); - } - } else { - result = -EINVAL; /* shut gcc up in certain arches */ - BUG(); - } - if (result < 0 - && result != -EINVAL /* device is gone */ - && rt != I2400M_RT_BUS) { - /* - * Things failed -- resort to lower level reset, that - * we queue in another context; the reason for this is - * that the pre and post reset functionality requires - * the i2400m->init_mutex; RT_WARM and RT_COLD can - * come from areas where i2400m->init_mutex is taken. - */ - dev_err(dev, "%s reset failed (%d); trying USB reset\n", - rt == I2400M_RT_WARM ? "warm" : "cold", result); - usb_queue_reset_device(i2400mu->usb_iface); - result = -ENODEV; - } - d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result); - return result; -} - -static void i2400mu_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *info) -{ - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - struct usb_device *udev = i2400mu->usb_dev; - - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->fw_version, i2400m->fw_name ? : "", - sizeof(info->fw_version)); - usb_make_path(udev, info->bus_info, sizeof(info->bus_info)); -} - -static const struct ethtool_ops i2400mu_ethtool_ops = { - .get_drvinfo = i2400mu_get_drvinfo, - .get_link = ethtool_op_get_link, -}; - -static -void i2400mu_netdev_setup(struct net_device *net_dev) -{ - struct i2400m *i2400m = net_dev_to_i2400m(net_dev); - struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m); - i2400mu_init(i2400mu); - i2400m_netdev_setup(net_dev); - net_dev->ethtool_ops = &i2400mu_ethtool_ops; -} - - -/* - * Debug levels control; see debug.h - */ -struct d_level D_LEVEL[] = { - D_SUBMODULE_DEFINE(usb), - D_SUBMODULE_DEFINE(fw), - D_SUBMODULE_DEFINE(notif), - D_SUBMODULE_DEFINE(rx), - D_SUBMODULE_DEFINE(tx), -}; -size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); - -static -void i2400mu_debugfs_add(struct i2400mu *i2400mu) -{ - struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry; - - dentry = debugfs_create_dir("i2400m-usb", dentry); - i2400mu->debugfs_dentry = dentry; - - d_level_register_debugfs("dl_", usb, dentry); - d_level_register_debugfs("dl_", fw, dentry); - d_level_register_debugfs("dl_", notif, dentry); - d_level_register_debugfs("dl_", rx, dentry); - d_level_register_debugfs("dl_", tx, dentry); - - /* Don't touch these if you don't know what you are doing */ - debugfs_create_u8("rx_size_auto_shrink", 0600, dentry, - &i2400mu->rx_size_auto_shrink); - - debugfs_create_size_t("rx_size", 0600, dentry, &i2400mu->rx_size); -} - - -static struct device_type i2400mu_type = { - .name = "wimax", -}; - -/* - * Probe a i2400m interface and register it - * - * @iface: USB interface to link to - * @id: USB class/subclass/protocol id - * @returns: 0 if ok, < 0 errno code on error. - * - * Alloc a net device, initialize the bus-specific details and then - * calls the bus-generic initialization routine. That will register - * the wimax and netdev devices, upload the firmware [using - * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the - * communication with the device and then will start to talk to it to - * finnish setting it up. - */ -static -int i2400mu_probe(struct usb_interface *iface, - const struct usb_device_id *id) -{ - int result; - struct net_device *net_dev; - struct device *dev = &iface->dev; - struct i2400m *i2400m; - struct i2400mu *i2400mu; - struct usb_device *usb_dev = interface_to_usbdev(iface); - - if (iface->cur_altsetting->desc.bNumEndpoints < 4) - return -ENODEV; - - if (usb_dev->speed != USB_SPEED_HIGH) - dev_err(dev, "device not connected as high speed\n"); - - /* Allocate instance [calls i2400m_netdev_setup() on it]. */ - result = -ENOMEM; - net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d", NET_NAME_UNKNOWN, - i2400mu_netdev_setup); - if (net_dev == NULL) { - dev_err(dev, "no memory for network device instance\n"); - goto error_alloc_netdev; - } - SET_NETDEV_DEV(net_dev, dev); - SET_NETDEV_DEVTYPE(net_dev, &i2400mu_type); - i2400m = net_dev_to_i2400m(net_dev); - i2400mu = container_of(i2400m, struct i2400mu, i2400m); - i2400m->wimax_dev.net_dev = net_dev; - i2400mu->usb_dev = usb_get_dev(usb_dev); - i2400mu->usb_iface = iface; - usb_set_intfdata(iface, i2400mu); - - i2400m->bus_tx_block_size = I2400MU_BLK_SIZE; - /* - * Room required in the Tx queue for USB message to accommodate - * a smallest payload while allocating header space is 16 bytes. - * Adding this room for the new tx message increases the - * possibilities of including any payload with size <= 16 bytes. - */ - i2400m->bus_tx_room_min = I2400MU_BLK_SIZE; - i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX; - i2400m->bus_setup = NULL; - i2400m->bus_dev_start = i2400mu_bus_dev_start; - i2400m->bus_dev_stop = i2400mu_bus_dev_stop; - i2400m->bus_release = NULL; - i2400m->bus_tx_kick = i2400mu_bus_tx_kick; - i2400m->bus_reset = i2400mu_bus_reset; - i2400m->bus_bm_retries = I2400M_USB_BOOT_RETRIES; - i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send; - i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; - i2400m->bus_bm_mac_addr_impaired = 0; - - switch (id->idProduct) { - case USB_DEVICE_ID_I6050: - case USB_DEVICE_ID_I6050_2: - case USB_DEVICE_ID_I6150: - case USB_DEVICE_ID_I6150_2: - case USB_DEVICE_ID_I6150_3: - case USB_DEVICE_ID_I6250: - i2400mu->i6050 = 1; - break; - default: - break; - } - - if (i2400mu->i6050) { - i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; - i2400mu->endpoint_cfg.bulk_out = 0; - i2400mu->endpoint_cfg.notification = 3; - i2400mu->endpoint_cfg.reset_cold = 2; - i2400mu->endpoint_cfg.bulk_in = 1; - } else { - i2400m->bus_fw_names = i2400mu_bus_fw_names_5x50; - i2400mu->endpoint_cfg.bulk_out = 0; - i2400mu->endpoint_cfg.notification = 1; - i2400mu->endpoint_cfg.reset_cold = 2; - i2400mu->endpoint_cfg.bulk_in = 3; - } -#ifdef CONFIG_PM - iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ - device_init_wakeup(dev, 1); - pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000); - usb_enable_autosuspend(usb_dev); -#endif - - result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT); - if (result < 0) { - dev_err(dev, "cannot setup device: %d\n", result); - goto error_setup; - } - i2400mu_debugfs_add(i2400mu); - return 0; - -error_setup: - usb_set_intfdata(iface, NULL); - usb_put_dev(i2400mu->usb_dev); - free_netdev(net_dev); -error_alloc_netdev: - return result; -} - - -/* - * Disconnect a i2400m from the system. - * - * i2400m_stop() has been called before, so al the rx and tx contexts - * have been taken down already. Make sure the queue is stopped, - * unregister netdev and i2400m, free and kill. - */ -static -void i2400mu_disconnect(struct usb_interface *iface) -{ - struct i2400mu *i2400mu = usb_get_intfdata(iface); - struct i2400m *i2400m = &i2400mu->i2400m; - struct net_device *net_dev = i2400m->wimax_dev.net_dev; - struct device *dev = &iface->dev; - - d_fnstart(3, dev, "(iface %p i2400m %p)\n", iface, i2400m); - - debugfs_remove_recursive(i2400mu->debugfs_dentry); - i2400m_release(i2400m); - usb_set_intfdata(iface, NULL); - usb_put_dev(i2400mu->usb_dev); - free_netdev(net_dev); - d_fnend(3, dev, "(iface %p i2400m %p) = void\n", iface, i2400m); -} - - -/* - * Get the device ready for USB port or system standby and hibernation - * - * USB port and system standby are handled the same. - * - * When the system hibernates, the USB device is powered down and then - * up, so we don't really have to do much here, as it will be seen as - * a reconnect. Still for simplicity we consider this case the same as - * suspend, so that the device has a chance to do notify the base - * station (if connected). - * - * So at the end, the three cases require common handling. - * - * If at the time of this call the device's firmware is not loaded, - * nothing has to be done. Note we can be "loose" about not reading - * i2400m->updown under i2400m->init_mutex. If it happens to change - * inmediately, other parts of the call flow will fail and effectively - * catch it. - * - * If the firmware is loaded, we need to: - * - * - tell the device to go into host interface power save mode, wait - * for it to ack - * - * This is quite more interesting than it is; we need to execute a - * command, but this time, we don't want the code in usb-{tx,rx}.c - * to call the usb_autopm_get/put_interface() barriers as it'd - * deadlock, so we need to decrement i2400mu->do_autopm, that acts - * as a poor man's semaphore. Ugly, but it works. - * - * As well, the device might refuse going to sleep for whichever - * reason. In this case we just fail. For system suspend/hibernate, - * we *can't* fail. We check PMSG_IS_AUTO to see if the - * suspend call comes from the USB stack or from the system and act - * in consequence. - * - * - stop the notification endpoint polling - */ -static -int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg) -{ - int result = 0; - struct device *dev = &iface->dev; - struct i2400mu *i2400mu = usb_get_intfdata(iface); - unsigned is_autosuspend = 0; - struct i2400m *i2400m = &i2400mu->i2400m; - -#ifdef CONFIG_PM - if (PMSG_IS_AUTO(pm_msg)) - is_autosuspend = 1; -#endif - - d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); - rmb(); /* see i2400m->updown's documentation */ - if (i2400m->updown == 0) - goto no_firmware; - if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) { - /* ugh -- the device is connected and this suspend - * request is an autosuspend one (not a system standby - * / hibernate). - * - * The only way the device can go to standby is if the - * link with the base station is in IDLE mode; that - * were the case, we'd be in status - * I2400M_SS_CONNECTED_IDLE. But we are not. - * - * If we *tell* him to go power save now, it'll reset - * as a precautionary measure, so if this is an - * autosuspend thing, say no and it'll come back - * later, when the link is IDLE - */ - result = -EBADF; - d_printf(1, dev, "fw up, link up, not-idle, autosuspend: " - "not entering powersave\n"); - goto error_not_now; - } - d_printf(1, dev, "fw up: entering powersave\n"); - atomic_dec(&i2400mu->do_autopm); - result = i2400m_cmd_enter_powersave(i2400m); - atomic_inc(&i2400mu->do_autopm); - if (result < 0 && !is_autosuspend) { - /* System suspend, can't fail */ - dev_err(dev, "failed to suspend, will reset on resume\n"); - result = 0; - } - if (result < 0) - goto error_enter_powersave; - i2400mu_notification_release(i2400mu); - d_printf(1, dev, "powersave requested\n"); -error_enter_powersave: -error_not_now: -no_firmware: - d_fnend(3, dev, "(iface %p pm_msg %u) = %d\n", - iface, pm_msg.event, result); - return result; -} - - -static -int i2400mu_resume(struct usb_interface *iface) -{ - int ret = 0; - struct device *dev = &iface->dev; - struct i2400mu *i2400mu = usb_get_intfdata(iface); - struct i2400m *i2400m = &i2400mu->i2400m; - - d_fnstart(3, dev, "(iface %p)\n", iface); - rmb(); /* see i2400m->updown's documentation */ - if (i2400m->updown == 0) { - d_printf(1, dev, "fw was down, no resume needed\n"); - goto out; - } - d_printf(1, dev, "fw was up, resuming\n"); - i2400mu_notification_setup(i2400mu); - /* USB has flow control, so we don't need to give it time to - * come back; otherwise, we'd use something like a get-state - * command... */ -out: - d_fnend(3, dev, "(iface %p) = %d\n", iface, ret); - return ret; -} - - -static -int i2400mu_reset_resume(struct usb_interface *iface) -{ - int result; - struct device *dev = &iface->dev; - struct i2400mu *i2400mu = usb_get_intfdata(iface); - struct i2400m *i2400m = &i2400mu->i2400m; - - d_fnstart(3, dev, "(iface %p)\n", iface); - result = i2400m_dev_reset_handle(i2400m, "device reset on resume"); - d_fnend(3, dev, "(iface %p) = %d\n", iface, result); - return result < 0 ? result : 0; -} - - -/* - * Another driver or user space is triggering a reset on the device - * which contains the interface passed as an argument. Cease IO and - * save any device state you need to restore. - * - * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if - * you are in atomic context. - */ -static -int i2400mu_pre_reset(struct usb_interface *iface) -{ - struct i2400mu *i2400mu = usb_get_intfdata(iface); - return i2400m_pre_reset(&i2400mu->i2400m); -} - - -/* - * The reset has completed. Restore any saved device state and begin - * using the device again. - * - * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if - * you are in atomic context. - */ -static -int i2400mu_post_reset(struct usb_interface *iface) -{ - struct i2400mu *i2400mu = usb_get_intfdata(iface); - return i2400m_post_reset(&i2400mu->i2400m); -} - - -static -struct usb_device_id i2400mu_id_table[] = { - { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, - { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, - { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150) }, - { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_2) }, - { USB_DEVICE(0x8087, USB_DEVICE_ID_I6150_3) }, - { USB_DEVICE(0x8086, USB_DEVICE_ID_I6250) }, - { USB_DEVICE(0x8086, 0x0181) }, - { USB_DEVICE(0x8086, 0x1403) }, - { USB_DEVICE(0x8086, 0x1405) }, - { USB_DEVICE(0x8086, 0x0180) }, - { USB_DEVICE(0x8086, 0x0182) }, - { USB_DEVICE(0x8086, 0x1406) }, - { USB_DEVICE(0x8086, 0x1403) }, - { }, -}; -MODULE_DEVICE_TABLE(usb, i2400mu_id_table); - - -static -struct usb_driver i2400mu_driver = { - .name = KBUILD_MODNAME, - .suspend = i2400mu_suspend, - .resume = i2400mu_resume, - .reset_resume = i2400mu_reset_resume, - .probe = i2400mu_probe, - .disconnect = i2400mu_disconnect, - .pre_reset = i2400mu_pre_reset, - .post_reset = i2400mu_post_reset, - .id_table = i2400mu_id_table, - .supports_autosuspend = 1, -}; - -static -int __init i2400mu_driver_init(void) -{ - d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400mu_debug_params, - "i2400m_usb.debug"); - return usb_register(&i2400mu_driver); -} -module_init(i2400mu_driver_init); - - -static -void __exit i2400mu_driver_exit(void) -{ - usb_deregister(&i2400mu_driver); -} -module_exit(i2400mu_driver_exit); - -MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); -MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M " - "(5x50 & 6050)"); -MODULE_LICENSE("GPL"); -MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5); -MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5); diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index c9f65e96ccb0..a3ed49cd95c3 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -215,7 +215,7 @@ static const struct net_device_ops netdev_ops = { .ndo_open = wg_open, .ndo_stop = wg_stop, .ndo_start_xmit = wg_xmit, - .ndo_get_stats64 = ip_tunnel_get_stats64 + .ndo_get_stats64 = dev_get_tstats64 }; static void wg_destruct(struct net_device *dev) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 170a64e67709..7add2002ff4c 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -18,19 +18,6 @@ menuconfig WLAN if WLAN -config WIRELESS_WDS - bool "mac80211-based legacy WDS support" if EXPERT - help - This option enables the deprecated WDS support, the newer - mac80211-based 4-addr AP/client support supersedes it with - a much better feature set (HT, VHT, ...) - - We plan to remove this option and code, so if you find - that you have to enable it, please let us know on the - linux-wireless@vger.kernel.org mailing list, so we can - help you migrate to 4-addr AP/client (or, if it's really - necessary, give up on our plan of removing it). - source "drivers/net/wireless/admtek/Kconfig" source "drivers/net/wireless/ath/Kconfig" source "drivers/net/wireless/atmel/Kconfig" diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index 5cf2045fadef..c41e72508d3d 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -1796,6 +1796,7 @@ static int adm8211_probe(struct pci_dev *pdev, if (io_len < 256 || mem_len < 1024) { printk(KERN_ERR "%s (adm8211): Too short PCI resources\n", pci_name(pdev)); + err = -ENOMEM; goto err_disable_pdev; } @@ -1805,6 +1806,7 @@ static int adm8211_probe(struct pci_dev *pdev, if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) { printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n", pci_name(pdev), reg); + err = -EINVAL; goto err_disable_pdev; } @@ -1815,8 +1817,8 @@ static int adm8211_probe(struct pci_dev *pdev, return err; /* someone else grabbed it? don't disable it */ } - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { printk(KERN_ERR "%s (adm8211): No suitable DMA available\n", pci_name(pdev)); goto err_free_reg; diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index d73ad60b571c..eeb6ff6aa2e1 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -651,6 +651,7 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi", [ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel", [ATH10K_FW_FEATURE_PEER_FIXED_RATE] = "peer-fixed-rate", + [ATH10K_FW_FEATURE_IRAM_RECOVERY] = "iram-recovery", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -1349,7 +1350,8 @@ out: static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const char *boardname, - const char *fallback_boardname, + const char *fallback_boardname1, + const char *fallback_boardname2, const char *filename) { size_t len, magic_len; @@ -1398,8 +1400,11 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, ret = ath10k_core_search_bd(ar, boardname, data, len); /* if we didn't find it and have a fallback name, try that */ - if (ret == -ENOENT && fallback_boardname) - ret = ath10k_core_search_bd(ar, fallback_boardname, data, len); + if (ret == -ENOENT && fallback_boardname1) + ret = ath10k_core_search_bd(ar, fallback_boardname1, data, len); + + if (ret == -ENOENT && fallback_boardname2) + ret = ath10k_core_search_bd(ar, fallback_boardname2, data, len); if (ret == -ENOENT) { ath10k_err(ar, @@ -1419,7 +1424,8 @@ err: } static int ath10k_core_create_board_name(struct ath10k *ar, char *name, - size_t name_len, bool with_variant) + size_t name_len, bool with_variant, + bool with_chip_id) { /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; @@ -1438,7 +1444,7 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name, } if (ar->id.qmi_ids_valid) { - if (with_variant && ar->id.bdf_ext[0] != '\0') + if (with_chip_id) scnprintf(name, name_len, "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s", ath10k_bus_str(ar->hif.bus), @@ -1482,21 +1488,36 @@ static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name, int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) { - char boardname[100], fallback_boardname[100]; + char boardname[100], fallback_boardname1[100], fallback_boardname2[100]; int ret; if (bd_ie_type == ATH10K_BD_IE_BOARD) { + /* With variant and chip id */ ret = ath10k_core_create_board_name(ar, boardname, - sizeof(boardname), true); + sizeof(boardname), true, + true); if (ret) { ath10k_err(ar, "failed to create board name: %d", ret); return ret; } - ret = ath10k_core_create_board_name(ar, fallback_boardname, - sizeof(boardname), false); + /* Without variant and only chip-id */ + ret = ath10k_core_create_board_name(ar, fallback_boardname1, + sizeof(boardname), false, + true); + if (ret) { + ath10k_err(ar, "failed to create 1st fallback board name: %d", + ret); + return ret; + } + + /* Without variant and without chip-id */ + ret = ath10k_core_create_board_name(ar, fallback_boardname2, + sizeof(boardname), false, + false); if (ret) { - ath10k_err(ar, "failed to create fallback board name: %d", ret); + ath10k_err(ar, "failed to create 2nd fallback board name: %d", + ret); return ret; } } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) { @@ -1510,7 +1531,8 @@ int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type) ar->bd_api = 2; ret = ath10k_core_fetch_board_data_api_n(ar, boardname, - fallback_boardname, + fallback_boardname1, + fallback_boardname2, ATH10K_BOARD_API2_FILE); if (!ret) goto success; @@ -2272,6 +2294,17 @@ static int ath10k_init_hw_params(struct ath10k *ar) return 0; } +void ath10k_core_start_recovery(struct ath10k *ar) +{ + if (test_and_set_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags)) { + ath10k_warn(ar, "already restarting\n"); + return; + } + + queue_work(ar->workqueue, &ar->restart_work); +} +EXPORT_SYMBOL(ath10k_core_start_recovery); + static void ath10k_core_restart(struct work_struct *work) { struct ath10k *ar = container_of(work, struct ath10k, restart_work); @@ -2604,6 +2637,78 @@ static int ath10k_core_compat_services(struct ath10k *ar) return 0; } +#define TGT_IRAM_READ_PER_ITR (8 * 1024) + +static int ath10k_core_copy_target_iram(struct ath10k *ar) +{ + const struct ath10k_hw_mem_layout *hw_mem; + const struct ath10k_mem_region *tmp, *mem_region = NULL; + dma_addr_t paddr; + void *vaddr = NULL; + u8 num_read_itr; + int i, ret; + u32 len, remaining_len; + + hw_mem = ath10k_coredump_get_mem_layout(ar); + if (!hw_mem) + return -ENOMEM; + + for (i = 0; i < hw_mem->region_table.size; i++) { + tmp = &hw_mem->region_table.regions[i]; + if (tmp->type == ATH10K_MEM_REGION_TYPE_REG) { + mem_region = tmp; + break; + } + } + + if (!mem_region) + return -ENOMEM; + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + if (ar->wmi.mem_chunks[i].req_id == + WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID) { + vaddr = ar->wmi.mem_chunks[i].vaddr; + len = ar->wmi.mem_chunks[i].len; + break; + } + } + + if (!vaddr || !len) { + ath10k_warn(ar, "No allocated memory for IRAM back up"); + return -ENOMEM; + } + + len = (len < mem_region->len) ? len : mem_region->len; + paddr = mem_region->start; + num_read_itr = len / TGT_IRAM_READ_PER_ITR; + remaining_len = len % TGT_IRAM_READ_PER_ITR; + for (i = 0; i < num_read_itr; i++) { + ret = ath10k_hif_diag_read(ar, paddr, vaddr, + TGT_IRAM_READ_PER_ITR); + if (ret) { + ath10k_warn(ar, "failed to copy firmware IRAM contents: %d", + ret); + return ret; + } + + paddr += TGT_IRAM_READ_PER_ITR; + vaddr += TGT_IRAM_READ_PER_ITR; + } + + if (remaining_len) { + ret = ath10k_hif_diag_read(ar, paddr, vaddr, remaining_len); + if (ret) { + ath10k_warn(ar, "failed to copy firmware IRAM contents: %d", + ret); + return ret; + } + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "target IRAM back up completed\n"); + + return 0; +} + int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, const struct ath10k_fw_components *fw) { @@ -2636,7 +2741,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, if (status) goto err; - /* Some of of qca988x solutions are having global reset issue + /* Some of qca988x solutions are having global reset issue * during target initialization. Bypassing PLL setting before * downloading firmware and letting the SoC run on REF_CLK is * fixing the problem. Corresponding firmware change is also @@ -2765,6 +2870,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); + if (test_bit(ATH10K_FW_FEATURE_IRAM_RECOVERY, + ar->running_fw->fw_file.fw_features)) { + status = ath10k_core_copy_target_iram(ar); + if (status) { + ath10k_warn(ar, "failed to copy target iram contents: %d", + status); + goto err_hif_stop; + } + } + if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) && mode == ATH10K_FIRMWARE_MODE_NORMAL) { val = 0; diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index b50ab9e229dc..51f7e960e297 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -84,6 +84,11 @@ #define ATH10K_MAX_RETRY_COUNT 30 +#define ATH10K_ITER_NORMAL_FLAGS (IEEE80211_IFACE_ITER_NORMAL | \ + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) +#define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\ + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) + struct ath10k; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -829,6 +834,9 @@ enum ath10k_fw_features { /* Firmware allows setting peer fixed rate */ ATH10K_FW_FEATURE_PEER_FIXED_RATE = 21, + /* Firmware support IRAM recovery */ + ATH10K_FW_FEATURE_IRAM_RECOVERY = 22, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -857,6 +865,9 @@ enum ath10k_dev_flags { /* Per Station statistics service */ ATH10K_FLAG_PEER_STATS, + + /* Indicates that ath10k device is during recovery process and not complete */ + ATH10K_FLAG_RESTARTING, }; enum ath10k_cal_mode { @@ -1312,6 +1323,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, const struct ath10k_fw_components *fw_components); int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt); void ath10k_core_stop(struct ath10k *ar); +void ath10k_core_start_recovery(struct ath10k *ar); int ath10k_core_register(struct ath10k *ar, const struct ath10k_bus_params *bus_params); void ath10k_core_unregister(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index e8250a665433..fd052f6ed019 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -583,7 +583,7 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, ret = ath10k_debug_fw_assert(ar); } else if (!strcmp(buf, "hw-restart")) { ath10k_info(ar, "user requested hw restart\n"); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); ret = 0; } else { ret = -EINVAL; @@ -1764,7 +1764,7 @@ static ssize_t ath10k_write_simulate_radar(struct file *file, struct ath10k *ar = file->private_data; struct ath10k_vif *arvif; - /* Just check for for the first vif alone, as all the vifs will be + /* Just check for the first vif alone, as all the vifs will be * sharing the same channel and if the channel is disabled, all the * vifs will share the same 'is_started' state. */ @@ -2005,7 +2005,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, } } else { ath10k_info(ar, "restarting firmware due to btcoex change"); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); } if (val) @@ -2136,7 +2136,7 @@ static ssize_t ath10k_write_peer_stats(struct file *file, ath10k_info(ar, "restarting firmware due to Peer stats change"); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); ret = count; exit: diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 5c1af2021883..9c4e6cf2137a 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -3878,7 +3878,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) return ath10k_htt_rx_proc_rx_frag_ind(htt, &resp->rx_frag_ind, skb); - break; } case HTT_T2H_MSG_TYPE_TEST: break; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 2e3eb5bbe49c..7d98250380ec 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -2066,7 +2066,7 @@ static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) { ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_mac_handle_beacon_iter, skb); } @@ -2099,7 +2099,7 @@ static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) { ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_mac_handle_beacon_miss_iter, &vdev_id); } @@ -3433,7 +3433,7 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) return; ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_RESUME_ALL, + ATH10K_ITER_RESUME_FLAGS, ath10k_mac_tx_unlock_iter, ar); @@ -3522,7 +3522,7 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, spin_lock_bh(&ar->htt.tx_lock); ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_RESUME_ALL, + ATH10K_ITER_RESUME_FLAGS, ath10k_mac_handle_tx_pause_iter, &arg); spin_unlock_bh(&ar->htt.tx_lock); @@ -5457,7 +5457,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, /* Some firmware revisions don't wait for beacon tx completion before * sending another SWBA event. This could lead to hardware using old * (freed) beacon data in some cases, e.g. tx credit starvation - * combined with missed TBTT. This is very very rare. + * combined with missed TBTT. This is very rare. * * On non-IOMMU-enabled hosts this could be a possible security issue * because hw could beacon some random data on the air. On @@ -7932,6 +7932,7 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw, ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; ieee80211_wake_queues(ar->hw); + clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags); } mutex_unlock(&ar->conf_mutex); @@ -8696,7 +8697,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { ieee80211_iterate_active_interfaces_atomic( hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_mac_change_chanctx_cnt_iter, &arg); if (arg.n_vifs == 0) @@ -8709,7 +8710,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, ieee80211_iterate_active_interfaces_atomic( hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_mac_change_chanctx_fill_iter, &arg); ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); @@ -9169,10 +9170,11 @@ static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw, goto exit; } + ret = 0; + if (sta) goto exit; - ret = 0; arvif->tids_rst = 0; data.curr_vif = vif; data.ar = ar; @@ -9593,14 +9595,12 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac, struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) { struct ath10k_vif_iter arvif_iter; - u32 flags; memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); arvif_iter.vdev_id = vdev_id; - flags = IEEE80211_IFACE_ITER_RESUME_ALL; ieee80211_iterate_active_interfaces_atomic(ar->hw, - flags, + ATH10K_ITER_RESUME_FLAGS, ath10k_get_arvif_iter, &arvif_iter); if (!arvif_iter.arvif) { diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c index 29c737b2f432..517b30f56b72 100644 --- a/drivers/net/wireless/ath/ath10k/p2p.c +++ b/drivers/net/wireless/ath/ath10k/p2p.c @@ -139,7 +139,7 @@ void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id, }; ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_p2p_noa_update_vdev_iter, &arg); } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 36426efdb2ea..2328df09875c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1774,7 +1774,7 @@ static void ath10k_pci_fw_dump_work(struct work_struct *work) mutex_unlock(&ar->dump_mutex); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); } static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) @@ -3236,7 +3236,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar) if (ret == 0) return 0; - /* fall-through */ + /* MHI failed, try legacy irq next */ } /* Try legacy irq diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index ae6b1f402adf..07e478f9a808 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -917,7 +917,7 @@ static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl, ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL); } -static struct qmi_msg_handler qmi_msg_handler[] = { +static const struct qmi_msg_handler qmi_msg_handler[] = { { .type = QMI_INDICATION, .msg_id = QMI_WLFW_FW_READY_IND_V01, @@ -981,7 +981,7 @@ static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl, NULL); } -static struct qmi_ops ath10k_qmi_ops = { +static const struct qmi_ops ath10k_qmi_ops = { .new_server = ath10k_qmi_new_server, .del_server = ath10k_qmi_del_server, }; diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index dec1582005b9..f2b6bf8f0d60 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -68,7 +68,7 @@ struct rx_attention { * first_msdu is set. * * peer_idx_invalid - * Indicates no matching entries within the the max search + * Indicates no matching entries within the max search * count. Only set when first_msdu is set. * * peer_idx_timeout diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 81ddaafb6721..c415090d1f37 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -561,7 +561,7 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar, ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH); ret = -ENOMEM; - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); ath10k_warn(ar, "exceeds length, start recovery\n"); goto err; @@ -960,7 +960,7 @@ static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar, ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS, irq_proc_reg, sizeof(*irq_proc_reg)); if (ret) { - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); ath10k_warn(ar, "read int status fail, start recovery\n"); goto out; } @@ -1248,7 +1248,7 @@ static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar, * Wait for first 4 bytes to be in FIFO * If CONSERVATIVE_BMI_READ is enabled, also wait for * a BMI command credit, which indicates that the ENTIRE - * response is available in the the FIFO + * response is available in the FIFO * * CASE 3: length > 128 * Wait for the first 4 bytes to be in FIFO @@ -1962,9 +1962,15 @@ static void ath10k_sdio_hif_stop(struct ath10k *ar) { struct ath10k_sdio_bus_request *req, *tmp_req; struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + struct sk_buff *skb; ath10k_sdio_irq_disable(ar); + cancel_work_sync(&ar_sdio->async_work_rx); + + while ((skb = skb_dequeue(&ar_sdio->rx_head))) + dev_kfree_skb_any(skb); + cancel_work_sync(&ar_sdio->wr_async_work); spin_lock_bh(&ar_sdio->wr_async_lock); @@ -2231,7 +2237,7 @@ static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param); - return param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW; + return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW); } static void ath10k_sdio_dump_registers(struct ath10k *ar, @@ -2307,8 +2313,8 @@ static int ath10k_sdio_dump_memory_section(struct ath10k *ar, } count = 0; - - for (i = 0; cur_section; i++) { + i = 0; + for (; cur_section; cur_section = next_section) { section_size = cur_section->end - cur_section->start; if (section_size <= 0) { @@ -2318,7 +2324,7 @@ static int ath10k_sdio_dump_memory_section(struct ath10k *ar, break; } - if ((i + 1) == mem_region->section_table.size) { + if (++i == mem_region->section_table.size) { /* last section */ next_section = NULL; skip_size = 0; @@ -2361,12 +2367,6 @@ static int ath10k_sdio_dump_memory_section(struct ath10k *ar, } count += skip_size; - - if (!next_section) - /* this was the last section */ - break; - - cur_section = next_section; } return count; @@ -2501,7 +2501,7 @@ void ath10k_sdio_fw_crashed_dump(struct ath10k *ar) ath10k_sdio_enable_intrs(ar); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); } static int ath10k_sdio_probe(struct sdio_func *func, diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index fd41f25456dc..bf9a8cb713dc 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1305,7 +1305,7 @@ int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) switch (type) { case ATH10K_QMI_EVENT_FW_READY_IND: if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) { - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); break; } diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index 05a620ff6fe2..19b9c27e30e2 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -997,6 +997,8 @@ static int ath10k_usb_probe(struct usb_interface *interface, ar_usb = ath10k_usb_priv(ar); ret = ath10k_usb_create(ar, interface); + if (ret) + goto err; ar_usb->ar = ar; ar->dev_id = product_id; @@ -1009,7 +1011,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_warn(ar, "failed to register driver core: %d\n", ret); - goto err; + goto err_usb_destroy; } /* TODO: remove this once USB support is fully implemented */ @@ -1017,6 +1019,9 @@ static int ath10k_usb_probe(struct usb_interface *interface, return 0; +err_usb_destroy: + ath10k_usb_destroy(ar); + err: ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 932266d1111b..7b5834157fe5 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1401,13 +1401,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, switch (tag) { case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: + arg->service_map_ext_valid = true; arg->service_map_ext_len = *(__le32 *)ptr; arg->service_map_ext = ptr + sizeof(__le32); return 0; default: break; } - return -EPROTO; + + return 0; } static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 1fa7107a5051..1f33947e2088 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1894,7 +1894,7 @@ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac, static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar) { ieee80211_iterate_active_interfaces_atomic(ar->hw, - IEEE80211_IFACE_ITER_NORMAL, + ATH10K_ITER_NORMAL_FLAGS, ath10k_wmi_tx_beacons_iter, NULL); } @@ -1937,7 +1937,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (ret == -EAGAIN) { ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n", cmd_id); - queue_work(ar->workqueue, &ar->restart_work); + ath10k_core_start_recovery(ar); } return ret; @@ -5751,8 +5751,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) ret); } - ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, - __le32_to_cpu(arg.service_map_ext_len)); + /* + * Initialization of "arg.service_map_ext_valid" to ZERO is necessary + * for the below logic to work. + */ + if (arg.service_map_ext_valid) + ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, + __le32_to_cpu(arg.service_map_ext_len)); } static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 4898e19b0af6..d870f7067cb7 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3060,6 +3060,8 @@ struct host_memory_chunk { __le32 size; } __packed; +#define WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID 8 + struct wmi_host_mem_chunks { __le32 count; /* some fw revisions require at least 1 chunk regardless of count */ @@ -3832,7 +3834,7 @@ enum wmi_pdev_param { WMI_PDEV_PARAM_BEACON_TX_MODE, /* * Resource manager off chan mode . - * 0: turn off off chan mode. 1: turn on offchan mode + * 0: turn off offchan mode. 1: turn on offchan mode */ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE, /* @@ -3936,7 +3938,7 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_BEACON_TX_MODE, /* * Resource manager off chan mode . - * 0: turn off off chan mode. 1: turn on offchan mode + * 0: turn off offchan mode. 1: turn on offchan mode */ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE, /* @@ -6917,6 +6919,7 @@ struct wmi_svc_rdy_ev_arg { }; struct wmi_svc_avail_ev_arg { + bool service_map_ext_valid; __le32 service_map_ext_len; const __le32 *service_map_ext; }; diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index c41d87bd025a..c1fce4159f1f 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -16,7 +16,8 @@ ath11k-y += core.o \ ce.o \ peer.o \ dbring.o \ - hw.o + hw.o \ + wow.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 430723c64adc..d4ef45cd0685 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -340,6 +340,31 @@ static void ath11k_ahb_power_down(struct ath11k_base *ab) rproc_shutdown(ab_ahb->tgt_rproc); } +static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab) +{ + int timeout; + + if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done || + ab->hw_params.cold_boot_calib == 0) + return 0; + + ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n"); + timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); + if (timeout <= 0) { + ath11k_cold_boot_cal = 0; + ath11k_warn(ab, "Coldboot Calibration failed timed out\n"); + } + + /* reset the firmware */ + ath11k_ahb_power_down(ab); + ath11k_ahb_power_up(ab); + + ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n"); + return 0; +} + static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) { struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; @@ -700,6 +725,8 @@ static int ath11k_ahb_probe(struct platform_device *pdev) goto err_ce_free; } + ath11k_ahb_fwreset_from_cold_boot(ab); + return 0; err_ce_free: @@ -720,6 +747,13 @@ static int ath11k_ahb_remove(struct platform_device *pdev) struct ath11k_base *ab = platform_get_drvdata(pdev); unsigned long left; + if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { + ath11k_ahb_power_down(ab); + ath11k_debugfs_soc_destroy(ab); + ath11k_qmi_deinit_service(ab); + goto qmi_fail; + } + reinit_completion(&ab->driver_recovery); if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { @@ -733,8 +767,8 @@ static int ath11k_ahb_remove(struct platform_device *pdev) cancel_work_sync(&ab->restart_work); ath11k_core_deinit(ab); +qmi_fail: ath11k_ahb_free_irq(ab); - ath11k_hal_srng_deinit(ab); ath11k_ce_free_pipes(ab); ath11k_core_free(ab); diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c index 9d730f8ac816..987c65010272 100644 --- a/drivers/net/wireless/ath/ath11k/ce.c +++ b/drivers/net/wireless/ath/ath11k/ce.c @@ -195,7 +195,7 @@ static bool ath11k_ce_need_shadow_fix(int ce_id) return false; } -static void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab) +void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab) { int i; diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 269b599ac0b0..d6eeef919349 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -190,4 +190,6 @@ int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, int ath11k_ce_attr_attach(struct ath11k_base *ab); void ath11k_ce_get_shadow_config(struct ath11k_base *ab, u32 **shadow_cfg, u32 *shadow_cfg_len); +void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab); + #endif diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index ebd6886a8c18..b97c38b9a270 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -7,11 +7,13 @@ #include <linux/slab.h> #include <linux/remoteproc.h> #include <linux/firmware.h> +#include <linux/of.h> #include "core.h" #include "dp_tx.h" #include "dp_rx.h" #include "debug.h" #include "hif.h" +#include "wow.h" unsigned int ath11k_debug_mask; EXPORT_SYMBOL(ath11k_debug_mask); @@ -50,7 +52,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074, .svc_to_ce_map_len = 21, .single_pdev_only = false, - .needs_band_to_mac = true, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, .rx_mac_buf_ring = false, @@ -65,6 +66,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = true, .supports_shadow_regs = false, .idle_ps = false, + .cold_boot_calib = true, + .supports_suspend = false, }, { .hw_rev = ATH11K_HW_IPQ6018_HW10, @@ -87,7 +90,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018, .svc_to_ce_map_len = 19, .single_pdev_only = false, - .needs_band_to_mac = true, .rxdma1_enable = true, .num_rxmda_per_pdev = 1, .rx_mac_buf_ring = false, @@ -102,6 +104,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = true, .supports_shadow_regs = false, .idle_ps = false, + .cold_boot_calib = true, + .supports_suspend = false, }, { .name = "qca6390 hw2.0", @@ -124,7 +128,6 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390, .svc_to_ce_map_len = 14, .single_pdev_only = true, - .needs_band_to_mac = false, .rxdma1_enable = false, .num_rxmda_per_pdev = 2, .rx_mac_buf_ring = true, @@ -138,17 +141,130 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_monitor = false, .supports_shadow_regs = true, .idle_ps = true, + .cold_boot_calib = false, + .supports_suspend = true, }, }; +int ath11k_core_suspend(struct ath11k_base *ab) +{ + int ret; + + if (!ab->hw_params.supports_suspend) + return -EOPNOTSUPP; + + /* TODO: there can frames in queues so for now add delay as a hack. + * Need to implement to handle and remove this delay. + */ + msleep(500); + + ret = ath11k_dp_rx_pktlog_stop(ab, true); + if (ret) { + ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n", + ret); + return ret; + } + + ret = ath11k_wow_enable(ab); + if (ret) { + ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); + return ret; + } + + ret = ath11k_dp_rx_pktlog_stop(ab, false); + if (ret) { + ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n", + ret); + return ret; + } + + ath11k_ce_stop_shadow_timers(ab); + ath11k_dp_stop_shadow_timers(ab); + + ath11k_hif_irq_disable(ab); + ath11k_hif_ce_irq_disable(ab); + + ret = ath11k_hif_suspend(ab); + if (!ret) { + ath11k_warn(ab, "failed to suspend hif: %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ath11k_core_suspend); + +int ath11k_core_resume(struct ath11k_base *ab) +{ + int ret; + + if (!ab->hw_params.supports_suspend) + return -EOPNOTSUPP; + + ret = ath11k_hif_resume(ab); + if (ret) { + ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); + return ret; + } + + ath11k_hif_ce_irq_enable(ab); + ath11k_hif_irq_enable(ab); + + ret = ath11k_dp_rx_pktlog_start(ab); + if (ret) { + ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", + ret); + return ret; + } + + ret = ath11k_wow_wakeup(ab); + if (ret) { + ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(ath11k_core_resume); + +int ath11k_core_check_dt(struct ath11k_base *ab) +{ + size_t max_len = sizeof(ab->qmi.target.bdf_ext); + const char *variant = NULL; + struct device_node *node; + + node = ab->dev->of_node; + if (!node) + return -ENOENT; + + of_property_read_string(node, "qcom,ath11k-calibration-variant", + &variant); + if (!variant) + return -ENODATA; + + if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0) + ath11k_dbg(ab, ATH11K_DBG_BOOT, + "bdf variant string is longer than the buffer can accommodate (variant: %s)\n", + variant); + + return 0; +} + static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, size_t name_len) { + /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ + char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; + + if (ab->qmi.target.bdf_ext[0] != '\0') + scnprintf(variant, sizeof(variant), ",variant=%s", + ab->qmi.target.bdf_ext); + scnprintf(name, name_len, - "bus=%s,qmi-chip-id=%d,qmi-board-id=%d", + "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", ath11k_bus_str(ab->hif.bus), ab->qmi.target.chip_id, - ab->qmi.target.board_id); + ab->qmi.target.board_id, variant); ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot using board name '%s'\n", name); @@ -612,6 +728,15 @@ static int ath11k_core_start(struct ath11k_base *ab, goto err_reo_cleanup; } + /* put hardware to DBS mode */ + if (ab->hw_params.single_pdev_only) { + ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); + if (ret) { + ath11k_err(ab, "failed to send dbs mode: %d\n", ret); + goto err_hif_stop; + } + } + ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab); if (ret) { ath11k_err(ab, "failed to send htt version request message: %d\n", @@ -774,8 +899,10 @@ static void ath11k_core_restart(struct work_struct *work) complete(&ar->scan.started); complete(&ar->scan.completed); complete(&ar->peer_assoc_done); + complete(&ar->peer_delete_done); complete(&ar->install_key_done); complete(&ar->vdev_setup_done); + complete(&ar->vdev_delete_done); complete(&ar->bss_survey_done); complete(&ar->thermal.wmi_sync); @@ -923,8 +1050,12 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); init_waitqueue_head(&ab->wmi_ab.tx_credits_wq); + init_waitqueue_head(&ab->qmi.cold_boot_waitq); INIT_WORK(&ab->restart_work, ath11k_core_restart); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); + init_completion(&ab->htc_suspend); + init_completion(&ab->wow.wakeup_completed); + ab->dev = dev; ab->bus_params = *bus_params; ab->hif.bus = bus; diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 18b97420f0d8..799bf3de1117 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -75,12 +75,14 @@ static inline enum wme_ac ath11k_tid_to_ac(u32 tid) enum ath11k_skb_flags { ATH11K_SKB_HW_80211_ENCAP = BIT(0), + ATH11K_SKB_CIPHER_SET = BIT(1), }; struct ath11k_skb_cb { dma_addr_t paddr; u8 eid; u8 flags; + u32 cipher; struct ath11k *ar; struct ieee80211_vif *vif; } __packed; @@ -111,8 +113,13 @@ enum ath11k_firmware_mode { /* factory tests etc */ ATH11K_FIRMWARE_MODE_FTM, + + /* Cold boot calibration */ + ATH11K_FIRMWARE_MODE_COLD_BOOT = 7, }; +extern bool ath11k_cold_boot_cal; + #define ATH11K_IRQ_NUM_MAX 52 #define ATH11K_EXT_IRQ_NUM_MAX 16 @@ -178,6 +185,8 @@ enum ath11k_dev_flags { ATH11K_FLAG_RECOVERY, ATH11K_FLAG_UNREGISTERING, ATH11K_FLAG_REGISTERED, + ATH11K_FLAG_QMI_FAIL, + ATH11K_FLAG_HTC_SUSPEND_COMPLETE, }; enum ath11k_monitor_flags { @@ -425,11 +434,7 @@ struct ath11k_per_peer_tx_stats { }; #define ATH11K_FLUSH_TIMEOUT (5 * HZ) - -struct ath11k_vdev_stop_status { - bool stop_in_progress; - u32 vdev_id; -}; +#define ATH11K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ) struct ath11k { struct ath11k_base *ab; @@ -461,6 +466,7 @@ struct ath11k { struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES]; } mac; + unsigned long dev_flags; unsigned int filter_flags; unsigned long monitor_flags; @@ -500,13 +506,14 @@ struct ath11k { u8 lmac_id; struct completion peer_assoc_done; + struct completion peer_delete_done; int install_key_status; struct completion install_key_done; int last_wmi_vdev_start_status; - struct ath11k_vdev_stop_status vdev_stop_status; struct completion vdev_setup_done; + struct completion vdev_delete_done; int num_peers; int max_num_peers; @@ -666,6 +673,10 @@ struct ath11k_base { const struct ath11k_hif_ops *ops; } hif; + struct { + struct completion wakeup_completed; + } wow; + struct ath11k_ce ce; struct timer_list rx_replenish_retry; struct ath11k_hal hal; @@ -723,13 +734,13 @@ struct ath11k_base { } stats; u32 pktlog_defs_checksum; - /* Round robbin based TCL ring selector */ - atomic_t tcl_ring_selector; - struct ath11k_dbring_cap *db_caps; u32 num_db_cap; struct timer_list mon_reap_timer; + + struct completion htc_suspend; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; @@ -883,8 +894,11 @@ void ath11k_core_free(struct ath11k_base *ath11k); int ath11k_core_fetch_bdf(struct ath11k_base *ath11k, struct ath11k_board_data *bd); void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); +int ath11k_core_check_dt(struct ath11k_base *ath11k); void ath11k_core_halt(struct ath11k *ar); +int ath11k_core_resume(struct ath11k_base *ab); +int ath11k_core_suspend(struct ath11k_base *ab); const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const char *filename); @@ -907,6 +921,8 @@ static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state) static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb) { + BUILD_BUG_ON(sizeof(struct ath11k_skb_cb) > + IEEE80211_TX_INFO_DRIVER_DATA_SIZE); return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; } diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 1b914e67d314..554feaf1ed5c 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -867,6 +867,7 @@ void ath11k_debugfs_soc_destroy(struct ath11k_base *ab) debugfs_remove_recursive(ab->debugfs_ath11k); ab->debugfs_ath11k = NULL; } +EXPORT_SYMBOL(ath11k_debugfs_soc_destroy); void ath11k_debugfs_fw_stats_init(struct ath11k *ar) { diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 59dd185a0cfc..04f6c4e0658b 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -304,7 +304,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, return 0; } -static void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) +void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab) { int i; @@ -381,7 +381,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) HAL_WBM2SW_RELEASE, i, 0, DP_TX_COMP_RING_SIZE); if (ret) { - ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d\n", + ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n", i, ret); goto err; } diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index ee8db812589b..ee768ccce46e 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -40,6 +40,7 @@ struct dp_rx_tid { #define DP_REO_DESC_FREE_THRESHOLD 64 #define DP_REO_DESC_FREE_TIMEOUT_MS 1000 +#define DP_MON_PURGE_TIMEOUT_MS 100 #define DP_MON_SERVICE_BUDGET 128 struct dp_reo_cache_flush_elem { @@ -423,7 +424,7 @@ enum htt_srng_ring_id { * Used only by Consumer ring to generate ring_sw_int_p. * Ring entries low threshold water mark, that is used * in combination with the interrupt timer as well as - * the the clearing of the level interrupt. + * the clearing of the level interrupt. * b'16:18 - prefetch_timer_cfg: * Used only by Consumer ring to set timer mode to * support Application prefetch handling. @@ -1640,5 +1641,6 @@ void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab, void ath11k_dp_shadow_init_timer(struct ath11k_base *ab, struct ath11k_hp_update_timer *update_timer, u32 interval, u32 ring_id); +void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab); #endif diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 01625327eef7..205c0f1a40e9 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -274,6 +274,28 @@ static void ath11k_dp_service_mon_ring(struct timer_list *t) msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); } +static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) +{ + int i, reaped = 0; + unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); + + do { + for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) + reaped += ath11k_dp_rx_process_mon_rings(ab, i, + NULL, + DP_MON_SERVICE_BUDGET); + + /* nothing more to reap */ + if (reaped < DP_MON_SERVICE_BUDGET) + return 0; + + } while (time_before(jiffies, timeout)); + + ath11k_warn(ab, "dp mon ring purge timeout"); + + return -ETIMEDOUT; +} + /* Returns number of Rx buffers replenished */ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, struct dp_rxdma_ring *rx_ring, @@ -377,7 +399,7 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, spin_lock_bh(&rx_ring->idr_lock); idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { idr_remove(&rx_ring->bufs_idr, buf_id); - /* TODO: Understand where internal driver does this dma_unmap of + /* TODO: Understand where internal driver does this dma_unmap * of rxdma_buffer. */ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, @@ -399,7 +421,7 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, spin_lock_bh(&rx_ring->idr_lock); idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { idr_remove(&rx_ring->bufs_idr, buf_id); - /* XXX: Understand where internal driver does this dma_unmap of + /* XXX: Understand where internal driver does this dma_unmap * of rxdma_buffer. */ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr, @@ -960,7 +982,7 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, rx_tid->ba_win_sz = ba_win_sz; - /* TODO: Optimize the memory allocation for qos tid based on the + /* TODO: Optimize the memory allocation for qos tid based on * the actual BA window size in REO tid update path. */ if (tid == HAL_DESC_REO_NON_QOS_TID) @@ -2715,7 +2737,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, paddr = dma_map_single(ab->dev, skb->data, skb->len + skb_tailroom(skb), - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ab->dev, paddr))) goto fail_free_skb; @@ -2731,7 +2753,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, fail_dma_unmap: dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); fail_free_skb: dev_kfree_skb_any(skb); fail_alloc_skb: @@ -2795,7 +2817,7 @@ fail_desc_get: idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); ath11k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); @@ -2856,13 +2878,9 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, rxcb = ATH11K_SKB_RXCB(skb); - dma_sync_single_for_cpu(ab->dev, rxcb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), - DMA_BIDIRECTIONAL); + DMA_FROM_DEVICE); tlv = (struct hal_tlv_hdr *)skb->data; if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != @@ -5030,3 +5048,29 @@ int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) ath11k_dp_mon_link_free(ar); return 0; } + +int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab) +{ + /* start reap timer */ + mod_timer(&ab->mon_reap_timer, + jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); + + return 0; +} + +int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer) +{ + int ret; + + if (stop_timer) + del_timer_sync(&ab->mon_reap_timer); + + /* reap all the monitor related rings */ + ret = ath11k_dp_purge_mon_ring(ab); + if (ret) { + ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h index fbea45f79c9b..bf399312b5ff 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.h +++ b/drivers/net/wireless/ath/ath11k/dp_rx.h @@ -91,4 +91,7 @@ int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar); int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar); int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id); +int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab); +int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer); + #endif /* ATH11K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 3d962eee4d61..6a3fcea6c233 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -84,7 +84,6 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, struct ath11k_dp *dp = &ab->dp; struct hal_tx_info ti = {0}; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct hal_srng *tcl_ring; struct ieee80211_hdr *hdr = (void *)skb->data; @@ -105,14 +104,14 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); - /* Let the default ring selection be based on a round robin - * fashion where one of the 3 tcl rings are selected based on - * the tcl_ring_selector counter. In case that ring + /* Let the default ring selection be based on current processor + * number, where one of the 3 tcl rings are selected based on + * the smp_processor_id(). In case that ring * is full/busy, we resort to other available rings. * If all rings are full, we drop the packet. * //TODO Add throttling logic when all rings are full */ - ring_selector = atomic_inc_return(&ab->tcl_ring_selector); + ring_selector = smp_processor_id(); tcl_ring_sel: tcl_ring_retry = false; @@ -149,9 +148,9 @@ tcl_ring_sel: ti.meta_data_flags = arvif->tcl_metadata; if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) { - if (key) { + if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { ti.encrypt_type = - ath11k_dp_tx_get_encrypt_type(key->cipher); + ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); if (ieee80211_has_protected(hdr->frame_control)) skb_put(skb, IEEE80211_CCMP_MIC_LEN); diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h index 8a592814efa0..1b713cb13b90 100644 --- a/drivers/net/wireless/ath/ath11k/hal_desc.h +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h @@ -716,7 +716,7 @@ struct hal_reo_dest_ring { * * rx_msdu_info * General information related to the MSDU that is passed - * on from RXDMA all the way to to the REO destination ring. + * on from RXDMA all the way to the REO destination ring. * * queue_addr_lo * Address (lower 32 bits) of the REO queue descriptor. @@ -1425,7 +1425,7 @@ struct hal_ce_srng_dest_desc { #define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11) #define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16) -#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(7, 0) +#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0) #define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20) #define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT @@ -1946,7 +1946,7 @@ enum hal_rx_reo_queue_pn_size { #define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4) #define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10) -#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 10) +#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16) #define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0) #define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24) @@ -2432,7 +2432,7 @@ struct hal_reo_flush_timeout_list_status { #define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0) #define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0) #define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0) -#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(23, 0) +#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0) struct hal_reo_desc_thresh_reached_status { struct hal_reo_status_hdr hdr; diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index dbe5568916e8..6285c52afc44 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -17,6 +17,8 @@ struct ath11k_hif_ops { void (*stop)(struct ath11k_base *sc); int (*power_up)(struct ath11k_base *sc); void (*power_down)(struct ath11k_base *sc); + int (*suspend)(struct ath11k_base *ab); + int (*resume)(struct ath11k_base *ab); int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name, @@ -24,8 +26,22 @@ struct ath11k_hif_ops { u32 *base_vector); void (*get_msi_address)(struct ath11k_base *ab, u32 *msi_addr_lo, u32 *msi_addr_hi); + void (*ce_irq_enable)(struct ath11k_base *ab); + void (*ce_irq_disable)(struct ath11k_base *ab); }; +static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab) +{ + if (ab->hif.ops->ce_irq_enable) + ab->hif.ops->ce_irq_enable(ab); +} + +static inline void ath11k_hif_ce_irq_disable(struct ath11k_base *ab) +{ + if (ab->hif.ops->ce_irq_disable) + ab->hif.ops->ce_irq_disable(ab); +} + static inline int ath11k_hif_start(struct ath11k_base *sc) { return sc->hif.ops->start(sc); @@ -56,6 +72,22 @@ static inline void ath11k_hif_power_down(struct ath11k_base *sc) sc->hif.ops->power_down(sc); } +static inline int ath11k_hif_suspend(struct ath11k_base *ab) +{ + if (ab->hif.ops->suspend) + return ab->hif.ops->suspend(ab); + + return 0; +} + +static inline int ath11k_hif_resume(struct ath11k_base *ab) +{ + if (ab->hif.ops->resume) + return ab->hif.ops->resume(ab); + + return 0; +} + static inline u32 ath11k_hif_read32(struct ath11k_base *sc, u32 address) { return sc->hif.ops->read32(sc, address); diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c index 6b57dc273e0b..54b1d34724d7 100644 --- a/drivers/net/wireless/ath/ath11k/htc.c +++ b/drivers/net/wireless/ath/ath11k/htc.c @@ -60,9 +60,11 @@ static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep, memset(hdr, 0, sizeof(*hdr)); hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) | FIELD_PREP(HTC_HDR_PAYLOADLEN, - (skb->len - sizeof(*hdr))) | - FIELD_PREP(HTC_HDR_FLAGS, - ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE); + (skb->len - sizeof(*hdr))); + + if (ep->tx_credit_flow_enabled) + hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS, + ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE); spin_lock_bh(&ep->htc->tx_lock); hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++); @@ -231,6 +233,18 @@ static int ath11k_htc_process_trailer(struct ath11k_htc *htc, return status; } +static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack) +{ + ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot suspend complete %d\n", ack); + + if (ack) + set_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + else + clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + + complete(&ab->htc_suspend); +} + void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, struct sk_buff *skb) { @@ -328,8 +342,17 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, complete(&htc->ctl_resp); break; + case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE: + ath11k_htc_suspend_complete(ab, true); + break; + case ATH11K_HTC_MSG_NACK_SUSPEND: + ath11k_htc_suspend_complete(ab, false); + break; + case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + break; default: - ath11k_warn(ab, "ignoring unsolicited htc ep0 event\n"); + ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n", + FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id)); break; } goto out; diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h index f0a3387567ca..6c8a469d7f9d 100644 --- a/drivers/net/wireless/ath/ath11k/htc.h +++ b/drivers/net/wireless/ath/ath11k/htc.h @@ -65,7 +65,9 @@ enum ath11k_htc_msg_id { ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3, ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4, ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5, - ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6 + ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6, + ATH11K_HTC_MSG_NACK_SUSPEND = 7, + ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID = 8, }; enum ath11k_htc_version { @@ -221,10 +223,6 @@ enum ath11k_htc_ep_id { ATH11K_HTC_EP_COUNT, }; -struct ath11k_htc_ops { - void (*target_send_suspend_complete)(struct ath11k_base *ar); -}; - struct ath11k_htc_ep_ops { void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *); void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *); @@ -284,8 +282,6 @@ struct ath11k_htc { /* protects endpoints */ spinlock_t tx_lock; - struct ath11k_htc_ops htc_ops; - u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN]; int control_resp_len; diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c index 11a411b76fe4..66331da35012 100644 --- a/drivers/net/wireless/ath/ath11k/hw.c +++ b/drivers/net/wireless/ath/ath11k/hw.c @@ -127,7 +127,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab, config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD; config->rx_batchmode = TARGET_RX_BATCHMODE; config->peer_map_unmap_v2_support = 1; - config->twt_ap_pdev_count = 2; + config->twt_ap_pdev_count = ab->num_radios; config->twt_ap_sta_count = 1000; } @@ -157,7 +157,7 @@ static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw, const struct ath11k_hw_ops ipq8074_ops = { .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id, - .wmi_init_config = ath11k_init_wmi_config_qca6390, + .wmi_init_config = ath11k_init_wmi_config_ipq8074, .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074, .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074, }; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 1dda4257e6d7..8af0034fdb05 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -143,12 +143,6 @@ struct ath11k_hw_params { bool single_pdev_only; - /* For example on QCA6390 struct - * wmi_init_cmd_param::band_to_mac_config needs to be false as the - * firmware creates the mapping. - */ - bool needs_band_to_mac; - bool rxdma1_enable; int num_rxmda_per_pdev; bool rx_mac_buf_ring; @@ -161,6 +155,8 @@ struct ath11k_hw_params { bool supports_monitor; bool supports_shadow_regs; bool idle_ps; + bool cold_boot_calib; + bool supports_suspend; }; struct ath11k_hw_ops { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 7f8dd47d2333..5c175e3e09b2 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -537,31 +537,6 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id) return NULL; } -struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab, - u32 vdev_id) -{ - int i; - struct ath11k_pdev *pdev; - struct ath11k *ar; - - for (i = 0; i < ab->num_radios; i++) { - pdev = rcu_dereference(ab->pdevs_active[i]); - if (pdev && pdev->ar) { - ar = pdev->ar; - - spin_lock_bh(&ar->data_lock); - if (ar->vdev_stop_status.stop_in_progress && - ar->vdev_stop_status.vdev_id == vdev_id) { - ar->vdev_stop_status.stop_in_progress = false; - spin_unlock_bh(&ar->data_lock); - return ar; - } - spin_unlock_bh(&ar->data_lock); - } - } - return NULL; -} - static void ath11k_pdev_caps_update(struct ath11k *ar) { struct ath11k_base *ab = ar->ab; @@ -1850,6 +1825,52 @@ static void ath11k_recalculate_mgmt_rate(struct ath11k *ar, ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } +static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif, + struct ieee80211_bss_conf *info) +{ + struct ath11k *ar = arvif->ar; + struct sk_buff *tmpl; + int ret; + u32 interval; + bool unsol_bcast_probe_resp_enabled = false; + + if (info->fils_discovery.max_interval) { + interval = info->fils_discovery.max_interval; + + tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif); + if (tmpl) + ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, + tmpl); + } else if (info->unsol_bcast_probe_resp_interval) { + unsol_bcast_probe_resp_enabled = 1; + interval = info->unsol_bcast_probe_resp_interval; + + tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw, + arvif->vif); + if (tmpl) + ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, + tmpl); + } else { /* Disable */ + return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false); + } + + if (!tmpl) { + ath11k_warn(ar->ab, + "mac vdev %i failed to retrieve %s template\n", + arvif->vdev_id, (unsol_bcast_probe_resp_enabled ? + "unsolicited broadcast probe response" : + "FILS discovery")); + return -EPERM; + } + kfree_skb(tmpl); + + if (!ret) + ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval, + unsol_bcast_probe_resp_enabled); + + return ret; +} + static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, @@ -1904,20 +1925,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (ret) ath11k_warn(ar->ab, "failed to update bcn template: %d\n", ret); - - if (vif->bss_conf.he_support) { - ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - WMI_VDEV_PARAM_BA_MODE, - WMI_BA_MODE_BUFFER_SIZE_256); - if (ret) - ath11k_warn(ar->ab, - "failed to set BA BUFFER SIZE 256 for vdev: %d\n", - arvif->vdev_id); - else - ath11k_dbg(ar->ab, ATH11K_DBG_MAC, - "Set BA BUFFER SIZE 256 for VDEV: %d\n", - arvif->vdev_id); - } } if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { @@ -1948,9 +1955,33 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ether_addr_copy(arvif->bssid, info->bssid); - if (changed & BSS_CHANGED_BEACON_ENABLED) + if (changed & BSS_CHANGED_BEACON_ENABLED) { ath11k_control_beaconing(arvif, info); + if (arvif->is_up && vif->bss_conf.he_support && + vif->bss_conf.he_oper.params) { + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + WMI_VDEV_PARAM_BA_MODE, + WMI_BA_MODE_BUFFER_SIZE_256); + if (ret) + ath11k_warn(ar->ab, + "failed to set BA BUFFER SIZE 256 for vdev: %d\n", + arvif->vdev_id); + + param_id = WMI_VDEV_PARAM_HEOPS_0_31; + param_value = vif->bss_conf.he_oper.params; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + ath11k_dbg(ar->ab, ATH11K_DBG_MAC, + "he oper param: %x set for VDEV: %d\n", + param_value, arvif->vdev_id); + + if (ret) + ath11k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n", + param_value, arvif->vdev_id, ret); + } + } + if (changed & BSS_CHANGED_ERP_CTS_PROT) { u32 cts_prot; @@ -2111,6 +2142,10 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, } } + if (changed & BSS_CHANGED_FILS_DISCOVERY || + changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) + ath11k_mac_fils_discovery(arvif, info); + mutex_unlock(&ar->conf_mutex); } @@ -3729,11 +3764,6 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar, he_cap_elem->mac_cap_info[1] &= IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; - he_cap_elem->phy_cap_info[4] &= - ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK; - he_cap_elem->phy_cap_info[4] &= - ~IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK; - he_cap_elem->phy_cap_info[4] |= (ar->num_tx_chains - 1) << 2; he_cap_elem->phy_cap_info[5] &= ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; @@ -3977,21 +4007,20 @@ static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work); - struct ieee80211_tx_info *info; + struct ath11k_skb_cb *skb_cb; struct ath11k_vif *arvif; struct sk_buff *skb; int ret; while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { - info = IEEE80211_SKB_CB(skb); - if (!info->control.vif) { - ath11k_warn(ar->ab, "no vif found for mgmt frame, flags 0x%x\n", - info->control.flags); + skb_cb = ATH11K_SKB_CB(skb); + if (!skb_cb->vif) { + ath11k_warn(ar->ab, "no vif found for mgmt frame\n"); ieee80211_free_txskb(ar->hw, skb); continue; } - arvif = ath11k_vif_to_arvif(info->control.vif); + arvif = ath11k_vif_to_arvif(skb_cb->vif); if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && arvif->is_started) { ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); @@ -4004,8 +4033,8 @@ static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) } } else { ath11k_warn(ar->ab, - "dropping mgmt frame for vdev %d, flags 0x%x is_started %d\n", - arvif->vdev_id, info->control.flags, + "dropping mgmt frame for vdev %d, is_started %d\n", + arvif->vdev_id, arvif->is_started); ieee80211_free_txskb(ar->hw, skb); } @@ -4053,10 +4082,20 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif = info->control.vif; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_key_conf *key = info->control.hw_key; + u32 info_flags = info->flags; bool is_prb_rsp; int ret; - if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + memset(skb_cb, 0, sizeof(*skb_cb)); + skb_cb->vif = vif; + + if (key) { + skb_cb->cipher = key->cipher; + skb_cb->flags |= ATH11K_SKB_CIPHER_SET; + } + + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP; } else if (ieee80211_is_mgmt(hdr->frame_control)) { is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); @@ -4094,7 +4133,8 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) if (enable) { tlv_filter = ath11k_mac_mon_status_filter_default; - tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); + if (ath11k_debugfs_rx_filter(ar)) + tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); } for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { @@ -4106,6 +4146,10 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) &tlv_filter); } + if (enable && !ar->ab->hw_params.rxdma1_enable) + mod_timer(&ar->ab->mon_reap_timer, jiffies + + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); + return ret; } @@ -4578,8 +4622,22 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, err_peer_del: if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + reinit_completion(&ar->peer_delete_done); + + ret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, + arvif->vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", + arvif->vdev_id, vif->addr); + goto err; + } + + ret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id, + vif->addr); + if (ret) + goto err; + ar->num_peers--; - ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, arvif->vdev_id); } err_vdev_del: @@ -4614,6 +4672,7 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, struct ath11k *ar = hw->priv; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ath11k_base *ab = ar->ab; + unsigned long time_left; int ret; int i; @@ -4622,10 +4681,6 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, ath11k_dbg(ab, ATH11K_DBG_MAC, "mac remove interface (vdev %d)\n", arvif->vdev_id); - spin_lock_bh(&ar->data_lock); - list_del(&arvif->list); - spin_unlock_bh(&ar->data_lock); - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); if (ret) @@ -4633,16 +4688,33 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, arvif->vdev_id, ret); } + reinit_completion(&ar->vdev_delete_done); + ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id); - if (ret) + if (ret) { ath11k_warn(ab, "failed to delete WMI vdev %d: %d\n", arvif->vdev_id, ret); + goto err_vdev_del; + } + time_left = wait_for_completion_timeout(&ar->vdev_delete_done, + ATH11K_VDEV_DELETE_TIMEOUT_HZ); + if (time_left == 0) { + ath11k_warn(ab, "Timeout in receiving vdev delete response\n"); + goto err_vdev_del; + } + + ab->free_vdev_map |= 1LL << (arvif->vdev_id); + ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ar->num_created_vdevs--; + ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", vif->addr, arvif->vdev_id); - ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); - ab->free_vdev_map |= 1LL << (arvif->vdev_id); + +err_vdev_del: + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); ath11k_peer_cleanup(ar, arvif->vdev_id); @@ -4946,13 +5018,6 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif) reinit_completion(&ar->vdev_setup_done); - spin_lock_bh(&ar->data_lock); - - ar->vdev_stop_status.stop_in_progress = true; - ar->vdev_stop_status.vdev_id = arvif->vdev_id; - - spin_unlock_bh(&ar->data_lock); - ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id); if (ret) { ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", @@ -4981,10 +5046,6 @@ static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif) return 0; err: - spin_lock_bh(&ar->data_lock); - ar->vdev_stop_status.stop_in_progress = false; - spin_unlock_bh(&ar->data_lock); - return ret; } @@ -5225,20 +5286,26 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, arvif->vdev_type != WMI_VDEV_TYPE_AP && arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); - mutex_unlock(&ar->conf_mutex); - return 0; + ret = 0; + goto out; } if (WARN_ON(arvif->is_started)) { - mutex_unlock(&ar->conf_mutex); - return -EBUSY; + ret = -EBUSY; + goto out; } if (ab->hw_params.vdev_start_delay) { param.vdev_id = arvif->vdev_id; param.peer_type = WMI_PEER_TYPE_DEFAULT; param.peer_addr = ar->mac_addr; + ret = ath11k_peer_create(ar, arvif, NULL, ¶m); + if (ret) { + ath11k_warn(ab, "failed to create peer after vdev start delay: %d", + ret); + goto out; + } } ret = ath11k_mac_vdev_start(arvif, &ctx->def); @@ -5246,23 +5313,21 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", arvif->vdev_id, vif->addr, ctx->def.chan->center_freq, ret); - goto err; + goto out; } if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id); if (ret) - goto err; + goto out; } arvif->is_started = true; /* TODO: Setup ps and cts/rts protection */ - mutex_unlock(&ar->conf_mutex); - - return 0; + ret = 0; -err: +out: mutex_unlock(&ar->conf_mutex); return ret; @@ -6258,6 +6323,13 @@ static int __ath11k_mac_register(struct ath11k *ar) ar->hw->wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath11k_iftypes_ext_capa); + if (ar->supports_6ghz) { + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_FILS_DISCOVERY); + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); + } + ath11k_reg_init(ar); if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { @@ -6397,7 +6469,9 @@ int ath11k_mac_allocate(struct ath11k_base *ab) INIT_LIST_HEAD(&ar->ppdu_stats_info); mutex_init(&ar->conf_mutex); init_completion(&ar->vdev_setup_done); + init_completion(&ar->vdev_delete_done); init_completion(&ar->peer_assoc_done); + init_completion(&ar->peer_delete_done); init_completion(&ar->install_key_done); init_completion(&ar->bss_survey_done); init_completion(&ar->scan.started); diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h index 0607479774a9..597104a9078d 100644 --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h @@ -137,8 +137,6 @@ struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, u32 vdev_id); struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id); struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id); -struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab, - u32 vdev_id); void ath11k_mac_drain_tx(struct ath11k *ar); void ath11k_mac_peer_cleanup_all(struct ath11k *ar); diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index aded9a719d51..09858e516903 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -24,7 +24,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = { .offload_channel = false, .doorbell_mode_switch = false, .auto_queue = false, - .auto_start = false, }, { .num = 1, @@ -39,7 +38,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = { .offload_channel = false, .doorbell_mode_switch = false, .auto_queue = false, - .auto_start = false, }, { .num = 20, @@ -54,7 +52,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = { .offload_channel = false, .doorbell_mode_switch = false, .auto_queue = false, - .auto_start = true, }, { .num = 21, @@ -69,7 +66,6 @@ static struct mhi_channel_config ath11k_mhi_channels[] = { .offload_channel = false, .doorbell_mode_switch = false, .auto_queue = true, - .auto_start = true, }, }; @@ -194,6 +190,15 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback cb) { + struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); + + switch (cb) { + case MHI_CB_SYS_ERROR: + ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); + break; + default: + break; + } } static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl, @@ -218,7 +223,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) struct mhi_controller *mhi_ctrl; int ret; - mhi_ctrl = kzalloc(sizeof(*mhi_ctrl), GFP_KERNEL); + mhi_ctrl = mhi_alloc_controller(); if (!mhi_ctrl) return -ENOMEM; @@ -234,7 +239,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) ret = ath11k_mhi_get_msi(ab_pci); if (ret) { ath11k_err(ab, "failed to get msi for mhi\n"); - kfree(mhi_ctrl); + mhi_free_controller(mhi_ctrl); return ret; } @@ -252,7 +257,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) ret = mhi_register_controller(mhi_ctrl, &ath11k_mhi_config); if (ret) { ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret); - kfree(mhi_ctrl); + mhi_free_controller(mhi_ctrl); return ret; } @@ -265,6 +270,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci) mhi_unregister_controller(mhi_ctrl); kfree(mhi_ctrl->irq); + mhi_free_controller(mhi_ctrl); } static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state) @@ -413,8 +419,10 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, ret = 0; break; case ATH11K_MHI_SUSPEND: + ret = mhi_pm_suspend(ab_pci->mhi_ctrl); break; case ATH11K_MHI_RESUME: + ret = mhi_pm_resume(ab_pci->mhi_ctrl); break; case ATH11K_MHI_TRIGGER_RDDM: ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl); @@ -465,3 +473,12 @@ void ath11k_mhi_stop(struct ath11k_pci *ab_pci) ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT); } +void ath11k_mhi_suspend(struct ath11k_pci *ab_pci) +{ + ath11k_mhi_set_state(ab_pci, ATH11K_MHI_SUSPEND); +} + +void ath11k_mhi_resume(struct ath11k_pci *ab_pci) +{ + ath11k_mhi_set_state(ab_pci, ATH11K_MHI_RESUME); +} diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h index a7fd5e201d18..488dada5d31c 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.h +++ b/drivers/net/wireless/ath/ath11k/mhi.h @@ -36,4 +36,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci); void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab); void ath11k_mhi_clear_vector(struct ath11k_base *ab); +void ath11k_mhi_suspend(struct ath11k_pci *ar_pci); +void ath11k_mhi_resume(struct ath11k_pci *ar_pci); + #endif diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index d7eb6b7160bb..857647aa57c8 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -126,6 +126,7 @@ static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offse if (window != ab_pci->register_window) { iowrite32(WINDOW_ENABLE_BIT | window, ab->mem + WINDOW_REG_ADDRESS); + ioread32(ab->mem + WINDOW_REG_ADDRESS); ab_pci->register_window = window; } } @@ -239,15 +240,137 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val); } +static int ath11k_pci_set_link_reg(struct ath11k_base *ab, + u32 offset, u32 value, u32 mask) +{ + u32 v; + int i; + + v = ath11k_pci_read32(ab, offset); + if ((v & mask) == value) + return 0; + + for (i = 0; i < 10; i++) { + ath11k_pci_write32(ab, offset, (v & ~mask) | value); + + v = ath11k_pci_read32(ab, offset); + if ((v & mask) == value) + return 0; + + mdelay(2); + } + + ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n", + offset, v & mask, value); + + return -ETIMEDOUT; +} + +static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_pci_set_link_reg(ab, + PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG, + PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, + PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); + if (!ret) { + ath11k_warn(ab, "failed to set sysclk: %d\n", ret); + return ret; + } + + ret = ath11k_pci_set_link_reg(ab, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + if (!ret) { + ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); + return ret; + } + + ret = ath11k_pci_set_link_reg(ab, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + if (!ret) { + ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); + return ret; + } + + ret = ath11k_pci_set_link_reg(ab, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL, + PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); + if (!ret) { + ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath11k_pci_enable_ltssm(struct ath11k_base *ab) +{ + u32 val; + int i; + + val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); + + /* PCIE link seems very unstable after the Hot Reset*/ + for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { + if (val == 0xffffffff) + mdelay(5); + + ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); + val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); + } + + ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val); + + val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + val |= GCC_GCC_PCIE_HOT_RST_VAL | 0x10; + ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); + val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); + + ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); + + mdelay(5); +} + +static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab) +{ + /* This is a WAR for PCIE Hotreset. + * When target receive Hotreset, but will set the interrupt. + * So when download SBL again, SBL will open Interrupt and + * receive it, and crash immediately. + */ + ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); +} + +static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab) +{ + u32 val; + + val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); + val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; + ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); +} + static void ath11k_pci_force_wake(struct ath11k_base *ab) { ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); mdelay(5); } -static void ath11k_pci_sw_reset(struct ath11k_base *ab) +static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) { - ath11k_pci_soc_global_reset(ab); + if (power_on) { + ath11k_pci_enable_ltssm(ab); + ath11k_pci_clear_all_intrs(ab); + ath11k_pci_set_wlaon_pwr_ctrl(ab); + ath11k_pci_fix_l1ss(ab); + } + ath11k_mhi_clear_vector(ab); ath11k_pci_soc_global_reset(ab); ath11k_mhi_set_mhictrl_reset(ab); @@ -264,13 +387,18 @@ int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, u32 *msi_addr_hi) { + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct pci_dev *pci_dev = to_pci_dev(ab->dev); pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, msi_addr_lo); - pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, - msi_addr_hi); + if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, + msi_addr_hi); + } else { + *msi_addr_hi = 0; + } } int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, @@ -380,9 +508,9 @@ static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab) } } -static void ath11k_pci_ce_tasklet(unsigned long data) +static void ath11k_pci_ce_tasklet(struct tasklet_struct *t) { - struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data; + struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); @@ -581,8 +709,7 @@ static int ath11k_pci_config_irq(struct ath11k_base *ab) irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; - tasklet_init(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet, - (unsigned long)ce_pipe); + tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); ret = request_irq(irq, ath11k_pci_ce_interrupt_handler, IRQF_SHARED, irq_name[irq_idx], @@ -659,6 +786,8 @@ static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) } ab_pci->msi_ep_base_data = msi_desc->msg.data; + if (msi_desc->msi_attrib.is_64) + set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); @@ -764,7 +893,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) ab_pci->register_window = 0; clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); - ath11k_pci_sw_reset(ab_pci->ab); + ath11k_pci_sw_reset(ab_pci->ab, true); ret = ath11k_mhi_start(ab_pci); if (ret) { @@ -779,10 +908,28 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + ath11k_pci_force_wake(ab_pci->ab); ath11k_mhi_stop(ab_pci); clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); - ath11k_pci_force_wake(ab_pci->ab); - ath11k_pci_sw_reset(ab_pci->ab); + ath11k_pci_sw_reset(ab_pci->ab, false); +} + +static int ath11k_pci_hif_suspend(struct ath11k_base *ab) +{ + struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); + + ath11k_mhi_suspend(ar_pci); + + return 0; +} + +static int ath11k_pci_hif_resume(struct ath11k_base *ab) +{ + struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); + + ath11k_mhi_resume(ar_pci); + + return 0; } static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) @@ -799,11 +946,16 @@ static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) } } -static void ath11k_pci_stop(struct ath11k_base *ab) +static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab) { ath11k_pci_ce_irqs_disable(ab); ath11k_pci_sync_ce_irqs(ab); ath11k_pci_kill_tasklets(ab); +} + +static void ath11k_pci_stop(struct ath11k_base *ab) +{ + ath11k_pci_ce_irq_disable_sync(ab); ath11k_ce_cleanup_pipes(ab); } @@ -819,6 +971,16 @@ static int ath11k_pci_start(struct ath11k_base *ab) return 0; } +static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab) +{ + ath11k_pci_ce_irqs_enable(ab); +} + +static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) +{ + ath11k_pci_ce_irq_disable_sync(ab); +} + static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { @@ -869,11 +1031,15 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = { .write32 = ath11k_pci_write32, .power_down = ath11k_pci_power_down, .power_up = ath11k_pci_power_up, + .suspend = ath11k_pci_hif_suspend, + .resume = ath11k_pci_hif_resume, .irq_enable = ath11k_pci_ext_irq_enable, .irq_disable = ath11k_pci_ext_irq_disable, .get_msi_address = ath11k_pci_get_msi_address, .get_user_msi_vector = ath11k_get_user_msi_assignment, .map_service_to_pipe = ath11k_pci_map_service_to_pipe, + .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, + .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, }; static int ath11k_pci_probe(struct pci_dev *pdev, @@ -1008,10 +1174,18 @@ static void ath11k_pci_remove(struct pci_dev *pdev) struct ath11k_base *ab = pci_get_drvdata(pdev); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { + ath11k_pci_power_down(ab); + ath11k_debugfs_soc_destroy(ab); + ath11k_qmi_deinit_service(ab); + goto qmi_fail; + } + set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); ath11k_core_deinit(ab); +qmi_fail: ath11k_mhi_unregister(ab_pci); ath11k_pci_free_irq(ab); @@ -1030,12 +1204,43 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev) ath11k_pci_power_down(ab); } +static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) +{ + struct ath11k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath11k_core_suspend(ab); + if (ret) + ath11k_warn(ab, "failed to suspend core: %d\n", ret); + + return ret; +} + +static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) +{ + struct ath11k_base *ab = dev_get_drvdata(dev); + int ret; + + ret = ath11k_core_resume(ab); + if (ret) + ath11k_warn(ab, "failed to resume core: %d\n", ret); + + return ret; +} + +static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops, + ath11k_pci_pm_suspend, + ath11k_pci_pm_resume); + static struct pci_driver ath11k_pci_driver = { .name = "ath11k_pci", .id_table = ath11k_pci_id_table, .probe = ath11k_pci_probe, .remove = ath11k_pci_remove, .shutdown = ath11k_pci_shutdown, +#ifdef CONFIG_PM + .driver.pm = &ath11k_pci_pm_ops, +#endif }; static int ath11k_pci_init(void) @@ -1060,3 +1265,8 @@ module_exit(ath11k_pci_exit); MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); + +/* QCA639x 2.0 firmware files */ +MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_BOARD_API2_FILE); +MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_AMSS_FILE); +MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_M3_FILE); diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h index 43562f774a37..0432a702416b 100644 --- a/drivers/net/wireless/ath/ath11k/pci.h +++ b/drivers/net/wireless/ath/ath11k/pci.h @@ -24,6 +24,30 @@ /* register used for handshake mechanism to validate UMAC is awake */ #define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004 +#define PCIE_PCIE_PARF_LTSSM 0x1e081b0 +#define PARM_LTSSM_VALUE 0x111 + +#define GCC_GCC_PCIE_HOT_RST 0x1e402bc +#define GCC_GCC_PCIE_HOT_RST_VAL 0x10 + +#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228 +#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2 +#define PCIE_INT_CLEAR_ALL 0xffffffff + +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG 0x01e0c0ac +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10 +#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG 0x01e0c628 +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL 0x02 +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG 0x01e0c62c +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL 0x52 +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG 0x01e0c634 +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL 0xff +#define PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK 0x000000ff + +#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c +#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 + struct ath11k_msi_user { char *name; int num_vectors; @@ -38,6 +62,7 @@ struct ath11k_msi_config { enum ath11k_pci_flags { ATH11K_PCI_FLAG_INIT_DONE, + ATH11K_PCI_FLAG_IS_MSI_64, }; struct ath11k_pci { diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c index 61ad9300eafb..1866d82678fa 100644 --- a/drivers/net/wireless/ath/ath11k/peer.c +++ b/drivers/net/wireless/ath/ath11k/peer.c @@ -177,12 +177,36 @@ static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false); } +int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, + const u8 *addr) +{ + int ret; + unsigned long time_left; + + ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr); + if (ret) { + ath11k_warn(ar->ab, "failed wait for peer deleted"); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->peer_delete_done, + 3 * HZ); + if (time_left == 0) { + ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n"); + return -ETIMEDOUT; + } + + return 0; +} + int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) { int ret; lockdep_assert_held(&ar->conf_mutex); + reinit_completion(&ar->peer_delete_done); + ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); if (ret) { ath11k_warn(ar->ab, @@ -191,7 +215,7 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) return ret; } - ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr); + ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr); if (ret) return ret; @@ -247,8 +271,22 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, spin_unlock_bh(&ar->ab->base_lock); ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n", param->peer_addr, param->vdev_id); - ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr, - param->vdev_id); + + reinit_completion(&ar->peer_delete_done); + + ret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr, + param->vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", + param->vdev_id, param->peer_addr); + return ret; + } + + ret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id, + param->peer_addr); + if (ret) + return ret; + return -ENOENT; } diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h index 5d125ce8984e..bba2e00b6944 100644 --- a/drivers/net/wireless/ath/ath11k/peer.h +++ b/drivers/net/wireless/ath/ath11k/peer.h @@ -41,5 +41,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id); int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr); int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, struct ieee80211_sta *sta, struct peer_create_params *param); +int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, + const u8 *addr); #endif /* _PEER_H_ */ diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index c2b165158225..f0b5c50974f3 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -14,6 +14,12 @@ #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define HOST_CSTATE_BIT 0x04 +bool ath11k_cold_boot_cal = 1; +EXPORT_SYMBOL(ath11k_cold_boot_cal); +module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644); +MODULE_PARM_DESC(cold_boot_cal, + "Decrease the channel switch time but increase the driver load time (Default: true)"); + static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { { .data_type = QMI_OPT_FLAG, @@ -1585,15 +1591,17 @@ static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) struct qmi_wlanfw_ind_register_resp_msg_v01 *resp; struct qmi_handle *handle = &ab->qmi.handle; struct qmi_txn txn; - int ret = 0; + int ret; req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; resp = kzalloc(sizeof(*resp), GFP_KERNEL); - if (!resp) + if (!resp) { + ret = -ENOMEM; goto resp_out; + } req->client_id_valid = 1; req->client_id = QMI_WLANFW_CLIENT_ID; @@ -1769,9 +1777,16 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) ath11k_warn(ab, "qmi mem size is low to load caldata\n"); return -EINVAL; } - /* TODO ath11k does not support cold boot calibration */ - ab->qmi.target_mem[idx].paddr = 0; - ab->qmi.target_mem[idx].vaddr = NULL; + + if (ath11k_cold_boot_cal && ab->hw_params.cold_boot_calib) { + ab->qmi.target_mem[idx].paddr = + ATH11K_QMI_CALDB_ADDRESS; + ab->qmi.target_mem[idx].vaddr = + (void *)ATH11K_QMI_CALDB_ADDRESS; + } else { + ab->qmi.target_mem[idx].paddr = 0; + ab->qmi.target_mem[idx].vaddr = NULL; + } ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; @@ -1793,6 +1808,7 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) struct qmi_wlanfw_cap_resp_msg_v01 resp; struct qmi_txn txn = {}; int ret = 0; + int r; memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); @@ -1858,6 +1874,10 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ab->qmi.target.fw_build_timestamp, ab->qmi.target.fw_build_id); + r = ath11k_core_check_dt(ab); + if (r) + ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n"); + out: return ret; } @@ -2352,6 +2372,32 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, return 0; } +static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) +{ + int timeout; + int ret; + + ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret); + return ret; + } + + ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n"); + + timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); + if (timeout <= 0) { + ath11k_warn(ab, "Coldboot Calibration failed - wait ended\n"); + return 0; + } + + ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n"); + + return 0; +} + static int ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi, enum ath11k_qmi_event_type type, @@ -2375,7 +2421,7 @@ ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi, return 0; } -static void ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) +static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; @@ -2383,17 +2429,19 @@ static void ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) ret = ath11k_qmi_fw_ind_register_send(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret); - return; + return ret; } ret = ath11k_qmi_host_cap_send(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret); - return; + return ret; } + + return ret; } -static void ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) +static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; @@ -2401,11 +2449,13 @@ static void ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) ret = ath11k_qmi_respond_fw_mem_request(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret); - return; + return ret; } + + return ret; } -static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) +static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) { struct ath11k_base *ab = qmi->ab; int ret; @@ -2413,7 +2463,7 @@ static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) ret = ath11k_qmi_request_target_cap(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to req target capabilities:%d\n", ret); - return; + return ret; } if (ab->bus_params.fixed_bdf_addr) @@ -2422,14 +2472,16 @@ static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) ret = ath11k_qmi_load_bdf_qmi(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret); - return; + return ret; } ret = ath11k_qmi_wlanfw_m3_info_send(ab); if (ret < 0) { ath11k_warn(ab, "qmi failed to send m3 info req:%d\n", ret); - return; + return ret; } + + return ret; } static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, @@ -2501,11 +2553,18 @@ static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL); } -static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi, +static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl, struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded) { + struct ath11k_qmi *qmi = container_of(qmi_hdl, + struct ath11k_qmi, handle); + struct ath11k_base *ab = qmi->ab; + + ab->qmi.cal_done = 1; + wake_up(&ab->qmi.cold_boot_waitq); + ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cold boot calibration done\n"); } static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = { @@ -2562,7 +2621,7 @@ static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi wifi fw qmi service connected\n"); ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL); - return 0; + return ret; } static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl, @@ -2586,6 +2645,7 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) event_work); struct ath11k_qmi_driver_event *event; struct ath11k_base *ab = qmi->ab; + int ret; spin_lock(&qmi->event_lock); while (!list_empty(&qmi->event_list)) { @@ -2599,28 +2659,42 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work) switch (event->type) { case ATH11K_QMI_EVENT_SERVER_ARRIVE: - ath11k_qmi_event_server_arrive(qmi); + ret = ath11k_qmi_event_server_arrive(qmi); + if (ret < 0) + set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_SERVER_EXIT: set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags); set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); break; case ATH11K_QMI_EVENT_REQUEST_MEM: - ath11k_qmi_event_mem_request(qmi); + ret = ath11k_qmi_event_mem_request(qmi); + if (ret < 0) + set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_FW_MEM_READY: - ath11k_qmi_event_load_bdf(qmi); + ret = ath11k_qmi_event_load_bdf(qmi); + if (ret < 0) + set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); break; case ATH11K_QMI_EVENT_FW_READY: + clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags); if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) { ath11k_hal_dump_srng_stats(ab); queue_work(ab->workqueue, &ab->restart_work); break; } - ath11k_core_qmi_firmware_ready(ab); - ab->qmi.cal_done = 1; - set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags); + if (ath11k_cold_boot_cal && ab->qmi.cal_done == 0 && + ab->hw_params.cold_boot_calib) { + ath11k_qmi_process_coldboot_calibration(ab); + } else { + clear_bit(ATH11K_FLAG_CRASH_FLUSH, + &ab->dev_flags); + clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags); + ath11k_core_qmi_firmware_ready(ab); + set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags); + } break; case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE: @@ -2682,4 +2756,5 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab) ath11k_qmi_m3_free(ab); ath11k_qmi_free_target_mem_chunk(ab); } +EXPORT_SYMBOL(ath11k_qmi_deinit_service); diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index b0a818f0401b..92925c9eac67 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -12,6 +12,7 @@ #define ATH11K_HOST_VERSION_STRING "WIN" #define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000 #define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64 +#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000 #define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024) #define ATH11K_QMI_CALDATA_OFFSET (128 * 1024) #define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128 @@ -24,6 +25,7 @@ #define ATH11K_QMI_RESP_LEN_MAX 8192 #define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32 #define ATH11K_QMI_CALDB_SIZE 0x480000 +#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 @@ -33,6 +35,7 @@ #define QMI_WLANFW_MAX_DATA_SIZE_V01 6144 #define ATH11K_FIRMWARE_MODE_OFF 4 #define ATH11K_QMI_TARGET_MEM_MODE_DEFAULT 0 +#define ATH11K_COLD_BOOT_FW_RESET_DELAY (40 * HZ) struct ath11k_base; @@ -101,6 +104,7 @@ struct target_info { u32 fw_version; char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1]; char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1]; + char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH]; }; struct m3_mem_region { @@ -125,6 +129,7 @@ struct ath11k_qmi { struct target_info target; struct m3_mem_region m3_mem; unsigned int service_ins_id; + wait_queue_head_t cold_boot_waitq; }; #define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189 diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index f6a1f0352989..b876fec7fa1b 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -80,6 +80,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) */ init_country_param.flags = ALPHA_IS_SET; memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2); + init_country_param.cc_info.alpha2[2] = 0; ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param); if (ret) @@ -277,6 +278,7 @@ ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region) case ATH11K_DFS_REG_KR: return NL80211_DFS_ETSI; case ATH11K_DFS_REG_MKK: + case ATH11K_DFS_REG_MKK_N: return NL80211_DFS_JP; default: return NL80211_DFS_UNSET; @@ -584,7 +586,6 @@ ath11k_reg_build_regd(struct ath11k_base *ab, if (!tmp_regd) goto ret; - tmp_regd->n_reg_rules = num_rules; memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1); alpha2[2] = '\0'; @@ -597,7 +598,7 @@ ath11k_reg_build_regd(struct ath11k_base *ab, /* Update reg_rules[] below. Firmware is expected to * send these rules in order(2G rules first and then 5G) */ - for (; i < tmp_regd->n_reg_rules; i++) { + for (; i < num_rules; i++) { if (reg_info->num_2g_reg_rules && (i < reg_info->num_2g_reg_rules)) { reg_rule = reg_info->reg_rules_2g_ptr + i; @@ -652,6 +653,8 @@ ath11k_reg_build_regd(struct ath11k_base *ab, flags); } + tmp_regd->n_reg_rules = i; + if (intersect) { default_regd = ab->default_regd[reg_info->phy_id]; diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 39b7fc943541..65d56d44796f 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -20,6 +20,7 @@ enum ath11k_dfs_region { ATH11K_DFS_REG_MKK, ATH11K_DFS_REG_CN, ATH11K_DFS_REG_KR, + ATH11K_DFS_REG_MKK_N, ATH11K_DFS_REG_UNDEF, }; diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h index 1c4264637a41..86494da1069a 100644 --- a/drivers/net/wireless/ath/ath11k/rx_desc.h +++ b/drivers/net/wireless/ath/ath11k/rx_desc.h @@ -170,7 +170,7 @@ struct rx_attention { * * ast_index_not_found * Only valid when first_msdu is set. Indicates no AST matching - * entries within the the max search count. + * entries within the max search count. * * ast_index_timeout * Only valid when first_msdu is set. Indicates an unsuccessful diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c index d2dc9db01491..4bf1931adbaa 100644 --- a/drivers/net/wireless/ath/ath11k/testmode.c +++ b/drivers/net/wireless/ath/ath11k/testmode.c @@ -51,7 +51,7 @@ bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb) ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI); if (ret) { ath11k_warn(ar->ab, - "failed to to put testmode wmi event cmd attribute: %d\n", + "failed to put testmode wmi event cmd attribute: %d\n", ret); kfree_skb(nl_skb); goto out; @@ -60,7 +60,7 @@ bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb) ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id); if (ret) { ath11k_warn(ar->ab, - "failed to to put testmode wmi even cmd_id: %d\n", + "failed to put testmode wmi even cmd_id: %d\n", ret); kfree_skb(nl_skb); goto out; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 8eca92520837..da4b546b62cb 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -122,6 +122,12 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { = { .min_len = sizeof(struct wmi_stats_event) }, [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, + [WMI_TAG_HOST_SWFDA_EVENT] = { + .min_len = sizeof(struct wmi_fils_discovery_event) }, + [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = { + .min_len = sizeof(struct wmi_probe_resp_tx_status_event) }, + [WMI_TAG_VDEV_DELETE_RESP_EVENT] = { + .min_len = sizeof(struct wmi_vdev_delete_resp_event) }, }; #define PRIMAP(_hw_mode_) \ @@ -362,7 +368,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, * For example, for 4x4 capable macphys, first 4 chains can be used for first * mac and the remaing 4 chains can be used for the second mac or vice-versa. * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 - * will be advertised for second mac or vice-versa. Compute the shift value for + * will be advertised for second mac or vice-versa. Compute the shift value * for tx/rx chainmask which will be used to advertise supported ht/vht rates to * mac80211. */ @@ -1682,7 +1688,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar, static inline void ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, - struct peer_assoc_params *param) + struct peer_assoc_params *param, + bool hw_crypto_disabled) { cmd->peer_flags = 0; @@ -1736,7 +1743,8 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, cmd->peer_flags |= WMI_PEER_AUTH; if (param->need_ptk_4_way) { cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; - cmd->peer_flags &= ~WMI_PEER_AUTH; + if (!hw_crypto_disabled) + cmd->peer_flags &= ~WMI_PEER_AUTH; } if (param->need_gtk_2_way) cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; @@ -1803,7 +1811,9 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, cmd->peer_new_assoc = param->peer_new_assoc; cmd->peer_associd = param->peer_associd; - ath11k_wmi_copy_peer_flags(cmd, param); + ath11k_wmi_copy_peer_flags(cmd, param, + test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, + &ar->ab->dev_flags)); ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac); @@ -1946,6 +1956,11 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar, WMI_SCAN_EVENT_DEQUEUED; arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT; arg->num_bssid = 1; + + /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be + * ZEROs in probe request + */ + eth_broadcast_addr(arg->bssid_list[0].addr); } static inline void @@ -2198,37 +2213,6 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, } } - len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); - tlv = ptr; - tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | - FIELD_PREP(WMI_TLV_LEN, len); - ptr += TLV_HDR_SIZE; - if (params->num_hint_s_ssid) { - s_ssid = ptr; - for (i = 0; i < params->num_hint_s_ssid; ++i) { - s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; - s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; - s_ssid++; - } - } - ptr += len; - - len = params->num_hint_bssid * sizeof(struct hint_bssid); - tlv = ptr; - tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | - FIELD_PREP(WMI_TLV_LEN, len); - ptr += TLV_HDR_SIZE; - if (params->num_hint_bssid) { - hint_bssid = ptr; - for (i = 0; i < params->num_hint_bssid; ++i) { - hint_bssid->freq_flags = - params->hint_bssid[i].freq_flags; - ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], - &hint_bssid->bssid.addr[0]); - hint_bssid++; - } - } - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID); if (ret) { @@ -3064,6 +3048,137 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, return ret; } +int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, + struct sk_buff *tmpl) +{ + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + size_t aligned_len; + struct wmi_fils_discovery_tmpl_cmd *cmd; + + aligned_len = roundup(tmpl->len, 4); + len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI vdev %i set FILS discovery template\n", vdev_id); + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_FILS_DISCOVERY_TMPL_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = vdev_id; + cmd->buf_len = tmpl->len; + ptr = skb->data + sizeof(*cmd); + + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, aligned_len); + memcpy(tlv->value, tmpl->data, tmpl->len); + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "WMI vdev %i failed to send FILS discovery template command\n", + vdev_id); + dev_kfree_skb(skb); + } + return ret; +} + +int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, + struct sk_buff *tmpl) +{ + struct wmi_probe_tmpl_cmd *cmd; + struct wmi_bcn_prb_info *probe_info; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + size_t aligned_len = roundup(tmpl->len, 4); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI vdev %i set probe response template\n", vdev_id); + + len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_probe_tmpl_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->vdev_id = vdev_id; + cmd->buf_len = tmpl->len; + + ptr = skb->data + sizeof(*cmd); + + probe_info = ptr; + len = sizeof(*probe_info); + probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_BCN_PRB_INFO) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + probe_info->caps = 0; + probe_info->erp = 0; + + ptr += sizeof(*probe_info); + + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) | + FIELD_PREP(WMI_TLV_LEN, aligned_len); + memcpy(tlv->value, tmpl->data, tmpl->len); + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "WMI vdev %i failed to send probe response template command\n", + vdev_id); + dev_kfree_skb(skb); + } + return ret; +} + +int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, + bool unsol_bcast_probe_resp_enabled) +{ + struct sk_buff *skb; + int ret, len; + struct wmi_fils_discovery_cmd *cmd; + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI vdev %i set %s interval to %u TU\n", + vdev_id, unsol_bcast_probe_resp_enabled ? + "unsolicited broadcast probe response" : "FILS discovery", + interval); + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_fils_discovery_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + cmd->vdev_id = vdev_id; + cmd->interval = interval; + cmd->config = unsol_bcast_probe_resp_enabled; + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "WMI vdev %i failed to send FILS discovery enable/disable command\n", + vdev_id); + dev_kfree_skb(skb); + } + return ret; +} + static void ath11k_fill_band_to_mac_param(struct ath11k_base *soc, struct wmi_host_pdev_band_to_mac *band_to_mac) @@ -3333,6 +3448,35 @@ int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab) return 0; } +int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, + enum wmi_host_hw_mode_config_type mode) +{ + struct wmi_pdev_set_hw_mode_cmd_param *cmd; + struct sk_buff *skb; + struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab; + int len; + int ret; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi_ab, len); + cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data; + + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->pdev_id = WMI_PDEV_ID_SOC; + cmd->hw_mode_index = mode; + + ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID); + if (ret) { + ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n"); + dev_kfree_skb(skb); + } + + return ret; +} + int ath11k_wmi_cmd_init(struct ath11k_base *ab) { struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab; @@ -3351,13 +3495,11 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) init_param.hw_mode_id = wmi_sc->preferred_hw_mode; init_param.mem_chunks = wmi_sc->mem_chunks; - if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE) + if (ab->hw_params.single_pdev_only) init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX; - if (ab->hw_params.needs_band_to_mac) { - init_param.num_band_to_mac = ab->num_radios; - ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); - } + init_param.num_band_to_mac = ab->num_radios; + ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); } @@ -4242,6 +4384,34 @@ static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff * return 0; } +static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab, + struct sk_buff *skb, + u32 *vdev_id) +{ + const void **tb; + const struct wmi_vdev_delete_resp_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, "failed to parse tlv: %d\n", ret); + return ret; + } + + ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch vdev delete resp ev"); + kfree(tb); + return -EPROTO; + } + + *vdev_id = ev->vdev_id; + + kfree(tb); + return 0; +} + static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf, u32 len, u32 *vdev_id, u32 *tx_status) @@ -5563,15 +5733,54 @@ static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb) static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_peer_delete_resp_event peer_del_resp; + struct ath11k *ar; if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { ath11k_warn(ab, "failed to extract peer delete resp"); return; } - /* TODO: Do we need to validate whether ath11k_peer_find() return NULL - * Why this is needed when there is HTT event for peer delete - */ + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d", + peer_del_resp.vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->peer_delete_done); + rcu_read_unlock(); + ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n", + peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr); +} + +static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct ath11k *ar; + u32 vdev_id = 0; + + if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) { + ath11k_warn(ab, "failed to extract vdev delete resp"); + return; + } + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d", + vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->vdev_delete_done); + + rcu_read_unlock(); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev delete resp for vdev id %d\n", + vdev_id); } static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status) @@ -5650,7 +5859,7 @@ static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *sk } rcu_read_lock(); - ar = ath11k_mac_get_ar_vdev_stop_status(ab, vdev_id); + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id); if (!ar) { ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d", vdev_id); @@ -6429,6 +6638,121 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab, ath11k_thermal_event_temperature(ar, ev.temp); } +static void ath11k_fils_discovery_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_fils_discovery_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, + "failed to parse FILS discovery event tlv %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_HOST_SWFDA_EVENT]; + if (!ev) { + ath11k_warn(ab, "failed to fetch FILS discovery event\n"); + kfree(tb); + return; + } + + ath11k_warn(ab, + "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n", + ev->vdev_id, ev->fils_tt, ev->tbtt); + + kfree(tb); +} + +static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_probe_resp_tx_status_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath11k_warn(ab, + "failed to parse probe response transmission status event tlv: %d\n", + ret); + return; + } + + ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT]; + if (!ev) { + ath11k_warn(ab, + "failed to fetch probe response transmission status event"); + kfree(tb); + return; + } + + if (ev->tx_status) + ath11k_warn(ab, + "Probe response transmission failed for vdev_id %u, status %u\n", + ev->vdev_id, ev->tx_status); + + kfree(tb); +} + +static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_wow_ev_arg *ev = data; + const char *wow_pg_fault; + int wow_pg_len; + + switch (tag) { + case WMI_TAG_WOW_EVENT_INFO: + memcpy(ev, ptr, sizeof(*ev)); + ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n", + ev->wake_reason, wow_reason(ev->wake_reason)); + break; + + case WMI_TAG_ARRAY_BYTE: + if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) { + wow_pg_fault = ptr; + /* the first 4 bytes are length */ + wow_pg_len = *(int *)wow_pg_fault; + wow_pg_fault += sizeof(int); + ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n", + wow_pg_len); + ath11k_dbg_dump(ab, ATH11K_DBG_WMI, + "wow_event_info_type packet present", + "wow_pg_fault ", + wow_pg_fault, + wow_pg_len); + } + break; + default: + break; + } + + return 0; +} + +static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_wow_ev_arg ev = { }; + int ret; + + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_wow_wakeup_host_parse, + &ev); + if (ret) { + ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret); + return; + } + + complete(&ab->wow.wakeup_completed); +} + static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -6515,9 +6839,14 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb); break; + case WMI_HOST_FILS_DISCOVERY_EVENTID: + ath11k_fils_discovery_event(ab, skb); + break; + case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID: + ath11k_probe_resp_tx_status_event(ab, skb); + break; /* add Unsupported events here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: - case WMI_VDEV_DELETE_RESP_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: case WMI_TWT_ENABLE_EVENTID: case WMI_TWT_DISABLE_EVENTID: @@ -6528,6 +6857,12 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID: ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb); break; + case WMI_VDEV_DELETE_RESP_EVENTID: + ath11k_vdev_delete_resp_event(ab, skb); + break; + case WMI_WOW_WAKEUP_HOST_EVENTID: + ath11k_wmi_event_wow_wakeup_host(ab, skb); + break; /* TODO: Add remaining events */ default: ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -6742,3 +7077,46 @@ void ath11k_wmi_detach(struct ath11k_base *ab) ath11k_wmi_free_dbring_caps(ab); } + +int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) +{ + struct wmi_wow_host_wakeup_ind *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_host_wakeup_ind *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); +} + +int ath11k_wmi_wow_enable(struct ath11k *ar) +{ + struct wmi_wow_enable_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_enable_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->enable = 1; + cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow enable\n"); + + return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 5a32ba0eb4f5..993674228c9e 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -319,6 +319,7 @@ enum wmi_tlv_cmd_id { WMI_BCN_OFFLOAD_CTRL_CMDID, WMI_BSS_COLOR_CHANGE_ENABLE_CMDID, WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID, + WMI_FILS_DISCOVERY_TMPL_CMDID, WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG), WMI_ADDBA_SEND_CMDID, WMI_ADDBA_STATUS_CMDID, @@ -351,6 +352,8 @@ enum wmi_tlv_cmd_id { WMI_ROAM_CONFIGURE_MAWC_CMDID, WMI_ROAM_SET_MBO_PARAM_CMDID, WMI_ROAM_PER_CONFIG_CMDID, + WMI_ROAM_BTM_CONFIG_CMDID, + WMI_ENABLE_FILS_CMDID, WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN), WMI_OFL_SCAN_REMOVE_AP_PROFILE, WMI_OFL_SCAN_PERIOD, @@ -642,6 +645,8 @@ enum wmi_tlv_event_id { WMI_MGMT_TX_COMPLETION_EVENTID, WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID, WMI_TBTTOFFSET_EXT_UPDATE_EVENTID, + WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID, + WMI_HOST_FILS_DISCOVERY_EVENTID, WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG), WMI_TX_ADDBA_COMPLETE_EVENTID, WMI_BA_RSP_SSN_EVENTID, @@ -1032,7 +1037,7 @@ enum wmi_tlv_vdev_param { WMI_VDEV_PARAM_PROTOTYPE = 0x8000, WMI_VDEV_PARAM_BSS_COLOR, WMI_VDEV_PARAM_SET_HEMU_MODE, - WMI_VDEV_PARAM_TX_OFDMA_CPLEN, + WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003, }; enum wmi_tlv_peer_flags { @@ -1810,6 +1815,7 @@ enum wmi_tlv_tag { /* TODO add all the missing cmds */ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO, + WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344, WMI_TAG_MAX }; @@ -3348,7 +3354,6 @@ struct wmi_mgmt_params { void *pdata; u16 desc_id; u8 *macaddr; - void *qdf_ctx; }; enum wmi_sta_ps_mode { @@ -4013,6 +4018,10 @@ struct wmi_regulatory_rule_struct { u32 flag_info; }; +struct wmi_vdev_delete_resp_event { + u32 vdev_id; +} __packed; + struct wmi_peer_delete_resp_event { u32 vdev_id; struct wmi_mac_addr peer_macaddr; @@ -4076,6 +4085,17 @@ struct wmi_peer_assoc_conf_arg { const u8 *macaddr; }; +struct wmi_fils_discovery_event { + u32 vdev_id; + u32 fils_tt; + u32 tbtt; +} __packed; + +struct wmi_probe_resp_tx_status_event { + u32 vdev_id; + u32 tx_status; +} __packed; + /* * PDEV statistics */ @@ -4908,6 +4928,30 @@ struct wmi_dma_buf_release_meta_data { u32 ch_width; } __packed; +enum wmi_fils_discovery_cmd_type { + WMI_FILS_DISCOVERY_CMD, + WMI_UNSOL_BCAST_PROBE_RESP, +}; + +struct wmi_fils_discovery_cmd { + u32 tlv_header; + u32 vdev_id; + u32 interval; + u32 config; /* enum wmi_fils_discovery_cmd_type */ +} __packed; + +struct wmi_fils_discovery_tmpl_cmd { + u32 tlv_header; + u32 vdev_id; + u32 buf_len; +} __packed; + +struct wmi_probe_tmpl_cmd { + u32 tlv_header; + u32 vdev_id; + u32 buf_len; +} __packed; + struct target_resource_config { u32 num_vdevs; u32 num_peers; @@ -5000,6 +5044,169 @@ struct ath11k_wmi_base { struct ath11k_targ_cap *targ_cap; }; +/* WOW structures */ +enum wmi_wow_wakeup_event { + WOW_BMISS_EVENT = 0, + WOW_BETTER_AP_EVENT, + WOW_DEAUTH_RECVD_EVENT, + WOW_MAGIC_PKT_RECVD_EVENT, + WOW_GTK_ERR_EVENT, + WOW_FOURWAY_HSHAKE_EVENT, + WOW_EAPOL_RECVD_EVENT, + WOW_NLO_DETECTED_EVENT, + WOW_DISASSOC_RECVD_EVENT, + WOW_PATTERN_MATCH_EVENT, + WOW_CSA_IE_EVENT, + WOW_PROBE_REQ_WPS_IE_EVENT, + WOW_AUTH_REQ_EVENT, + WOW_ASSOC_REQ_EVENT, + WOW_HTT_EVENT, + WOW_RA_MATCH_EVENT, + WOW_HOST_AUTO_SHUTDOWN_EVENT, + WOW_IOAC_MAGIC_EVENT, + WOW_IOAC_SHORT_EVENT, + WOW_IOAC_EXTEND_EVENT, + WOW_IOAC_TIMER_EVENT, + WOW_DFS_PHYERR_RADAR_EVENT, + WOW_BEACON_EVENT, + WOW_CLIENT_KICKOUT_EVENT, + WOW_EVENT_MAX, +}; + +enum wmi_wow_interface_cfg { + WOW_IFACE_PAUSE_ENABLED, + WOW_IFACE_PAUSE_DISABLED +}; + +#define C2S(x) case x: return #x + +static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev) +{ + switch (ev) { + C2S(WOW_BMISS_EVENT); + C2S(WOW_BETTER_AP_EVENT); + C2S(WOW_DEAUTH_RECVD_EVENT); + C2S(WOW_MAGIC_PKT_RECVD_EVENT); + C2S(WOW_GTK_ERR_EVENT); + C2S(WOW_FOURWAY_HSHAKE_EVENT); + C2S(WOW_EAPOL_RECVD_EVENT); + C2S(WOW_NLO_DETECTED_EVENT); + C2S(WOW_DISASSOC_RECVD_EVENT); + C2S(WOW_PATTERN_MATCH_EVENT); + C2S(WOW_CSA_IE_EVENT); + C2S(WOW_PROBE_REQ_WPS_IE_EVENT); + C2S(WOW_AUTH_REQ_EVENT); + C2S(WOW_ASSOC_REQ_EVENT); + C2S(WOW_HTT_EVENT); + C2S(WOW_RA_MATCH_EVENT); + C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT); + C2S(WOW_IOAC_MAGIC_EVENT); + C2S(WOW_IOAC_SHORT_EVENT); + C2S(WOW_IOAC_EXTEND_EVENT); + C2S(WOW_IOAC_TIMER_EVENT); + C2S(WOW_DFS_PHYERR_RADAR_EVENT); + C2S(WOW_BEACON_EVENT); + C2S(WOW_CLIENT_KICKOUT_EVENT); + C2S(WOW_EVENT_MAX); + default: + return NULL; + } +} + +enum wmi_wow_wake_reason { + WOW_REASON_UNSPECIFIED = -1, + WOW_REASON_NLOD = 0, + WOW_REASON_AP_ASSOC_LOST, + WOW_REASON_LOW_RSSI, + WOW_REASON_DEAUTH_RECVD, + WOW_REASON_DISASSOC_RECVD, + WOW_REASON_GTK_HS_ERR, + WOW_REASON_EAP_REQ, + WOW_REASON_FOURWAY_HS_RECV, + WOW_REASON_TIMER_INTR_RECV, + WOW_REASON_PATTERN_MATCH_FOUND, + WOW_REASON_RECV_MAGIC_PATTERN, + WOW_REASON_P2P_DISC, + WOW_REASON_WLAN_HB, + WOW_REASON_CSA_EVENT, + WOW_REASON_PROBE_REQ_WPS_IE_RECV, + WOW_REASON_AUTH_REQ_RECV, + WOW_REASON_ASSOC_REQ_RECV, + WOW_REASON_HTT_EVENT, + WOW_REASON_RA_MATCH, + WOW_REASON_HOST_AUTO_SHUTDOWN, + WOW_REASON_IOAC_MAGIC_EVENT, + WOW_REASON_IOAC_SHORT_EVENT, + WOW_REASON_IOAC_EXTEND_EVENT, + WOW_REASON_IOAC_TIMER_EVENT, + WOW_REASON_ROAM_HO, + WOW_REASON_DFS_PHYERR_RADADR_EVENT, + WOW_REASON_BEACON_RECV, + WOW_REASON_CLIENT_KICKOUT_EVENT, + WOW_REASON_PAGE_FAULT = 0x3a, + WOW_REASON_DEBUG_TEST = 0xFF, +}; + +static inline const char *wow_reason(enum wmi_wow_wake_reason reason) +{ + switch (reason) { + C2S(WOW_REASON_UNSPECIFIED); + C2S(WOW_REASON_NLOD); + C2S(WOW_REASON_AP_ASSOC_LOST); + C2S(WOW_REASON_LOW_RSSI); + C2S(WOW_REASON_DEAUTH_RECVD); + C2S(WOW_REASON_DISASSOC_RECVD); + C2S(WOW_REASON_GTK_HS_ERR); + C2S(WOW_REASON_EAP_REQ); + C2S(WOW_REASON_FOURWAY_HS_RECV); + C2S(WOW_REASON_TIMER_INTR_RECV); + C2S(WOW_REASON_PATTERN_MATCH_FOUND); + C2S(WOW_REASON_RECV_MAGIC_PATTERN); + C2S(WOW_REASON_P2P_DISC); + C2S(WOW_REASON_WLAN_HB); + C2S(WOW_REASON_CSA_EVENT); + C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV); + C2S(WOW_REASON_AUTH_REQ_RECV); + C2S(WOW_REASON_ASSOC_REQ_RECV); + C2S(WOW_REASON_HTT_EVENT); + C2S(WOW_REASON_RA_MATCH); + C2S(WOW_REASON_HOST_AUTO_SHUTDOWN); + C2S(WOW_REASON_IOAC_MAGIC_EVENT); + C2S(WOW_REASON_IOAC_SHORT_EVENT); + C2S(WOW_REASON_IOAC_EXTEND_EVENT); + C2S(WOW_REASON_IOAC_TIMER_EVENT); + C2S(WOW_REASON_ROAM_HO); + C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT); + C2S(WOW_REASON_BEACON_RECV); + C2S(WOW_REASON_CLIENT_KICKOUT_EVENT); + C2S(WOW_REASON_PAGE_FAULT); + C2S(WOW_REASON_DEBUG_TEST); + default: + return NULL; + } +} + +#undef C2S + +struct wmi_wow_enable_cmd { + u32 tlv_header; + u32 enable; + u32 pause_iface_config; + u32 flags; +} __packed; + +struct wmi_wow_host_wakeup_ind { + u32 tlv_header; + u32 reserved; +} __packed; + +struct wmi_wow_ev_arg { + u32 vdev_id; + u32 flag; + enum wmi_wow_wake_reason wake_reason; + u32 data_len; +}; + int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, u32 cmd_id); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); @@ -5121,4 +5328,15 @@ int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, u32 trigger, u32 enable); int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, struct ath11k_wmi_vdev_spectral_conf_param *param); +int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id, + struct sk_buff *tmpl); +int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval, + bool unsol_bcast_probe_resp_enabled); +int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id, + struct sk_buff *tmpl); +int ath11k_wmi_set_hw_mode(struct ath11k_base *ab, + enum wmi_host_hw_mode_config_type mode); +int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar); +int ath11k_wmi_wow_enable(struct ath11k *ar); + #endif diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c new file mode 100644 index 000000000000..43c62e99dd0e --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#include <linux/delay.h> + +#include "mac.h" +#include "core.h" +#include "hif.h" +#include "debug.h" +#include "wmi.h" +#include "wow.h" + +int ath11k_wow_enable(struct ath11k_base *ab) +{ + struct ath11k *ar = ath11k_ab_to_ar(ab, 0); + int i, ret; + + clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + + for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) { + reinit_completion(&ab->htc_suspend); + + ret = ath11k_wmi_wow_enable(ar); + if (ret) { + ath11k_warn(ab, "failed to issue wow enable: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); + if (ret == 0) { + ath11k_warn(ab, + "timed out while waiting for htc suspend completion\n"); + return -ETIMEDOUT; + } + + if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags)) + /* success, suspend complete received */ + return 0; + + ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n", + i); + msleep(ATH11K_WOW_RETRY_WAIT_MS); + } + + ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i); + + return -ETIMEDOUT; +} + +int ath11k_wow_wakeup(struct ath11k_base *ab) +{ + struct ath11k *ar = ath11k_ab_to_ar(ab, 0); + int ret; + + reinit_completion(&ab->wow.wakeup_completed); + + ret = ath11k_wmi_wow_host_wakeup_ind(ar); + if (ret) { + ath11k_warn(ab, "failed to send wow wakeup indication: %d\n", + ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); + if (ret == 0) { + ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n"); + return -ETIMEDOUT; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h new file mode 100644 index 000000000000..dabc4ee63cf6 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/wow.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + */ + +#define ATH11K_WOW_RETRY_NUM 3 +#define ATH11K_WOW_RETRY_WAIT_MS 200 + +int ath11k_wow_enable(struct ath11k_base *ab); +int ath11k_wow_wakeup(struct ath11k_base *ab); diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 5e866a193ed0..8f2719ff463c 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -433,6 +433,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, case NL80211_IFTYPE_STATION: if (ah->assoc) rfilt |= AR5K_RX_FILTER_BEACON; + break; default: break; } diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c index f3906dbe5495..89c7c4e25169 100644 --- a/drivers/net/wireless/ath/ath6kl/testmode.c +++ b/drivers/net/wireless/ath/ath6kl/testmode.c @@ -94,7 +94,6 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev, return 0; - break; case ATH6KL_TM_CMD_RX_REPORT: default: return -EOPNOTSUPP; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index dbc47702a268..b137e7f34397 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -1821,8 +1821,8 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb, /* Only for OPT_TX_CMD, use BE endpoint. */ if (cmd_id == WMI_OPT_TX_FRAME_CMDID) { - ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, - false, false, 0, NULL, if_idx); + ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE, false, + WMI_DATA_HDR_DATA_TYPE_802_3, 0, NULL, if_idx); if (ret) { dev_kfree_skb(skb); return ret; diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 2fa30834a88d..6610d76131fa 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -102,13 +102,8 @@ static void ar5008_write_bank6(struct ath_hw *ah, unsigned int *writecnt) REGWRITE_BUFFER_FLUSH(ah); } -/** +/* * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters - * @rfbuf: - * @reg32: - * @numBits: - * @firstBit: - * @column: * * Performs analog "swizzling" of parameters into their location. * Used on external AR2133/AR5133 radios. @@ -198,10 +193,8 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq) ar5008_write_bank6(ah, ®_writes); } -/** +/* * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios - * @ah: atheros hardware structure - * @chan: * * For the external AR2133/AR5133 radios, takes the MHz channel value and set * the channel value. Assumes writes enabled to analog bus and bank6 register @@ -430,10 +423,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); } -/** +/* * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios - * @ah: atheros hardware structure - * @chan: * * For non single-chip solutions. Converts to baseband spur frequency given the * input channel frequency and compute register settings below. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index c07866a2fdf9..16d5c0c5e2a8 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -1724,20 +1724,6 @@ static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { {0x00004044, 0x00000000}, }; -static const u32 ar9300PciePhy_clkreq_enable_L1_2p2[][2] = { - /* Addr allmodes */ - {0x00004040, 0x0825365e}, - {0x00004040, 0x0008003b}, - {0x00004044, 0x00000000}, -}; - -static const u32 ar9300PciePhy_clkreq_disable_L1_2p2[][2] = { - /* Addr allmodes */ - {0x00004040, 0x0821365e}, - {0x00004040, 0x0008003b}, - {0x00004044, 0x00000000}, -}; - static const u32 ar9300_2p2_baseband_core_txfir_coeff_japan_2484[][2] = { /* Addr allmodes */ {0x0000a398, 0x00000000}, diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h index 29479afbc4f1..3e783fc13553 100644 --- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h @@ -1010,11 +1010,4 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = { {0x0000a1fc, 0x00000296}, }; -static const u32 ar9331_common_tx_gain_offset1_1[][1] = { - {0x00000000}, - {0x00000003}, - {0x00000000}, - {0x00000000}, -}; - #endif /* INITVALS_9330_1P1_H */ diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h index 2eb163fc1c18..3da4ea564148 100644 --- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h @@ -621,107 +621,6 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = { {0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266}, }; -static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = { - /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, - {0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae}, - {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, - {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e}, - {0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52}, - {0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84}, - {0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000}, - {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9}, - {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, - {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002}, - {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004}, - {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200}, - {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202}, - {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400}, - {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402}, - {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404}, - {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603}, - {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02}, - {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04}, - {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20}, - {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20}, - {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22}, - {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24}, - {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640}, - {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660}, - {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861}, - {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81}, - {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83}, - {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84}, - {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3}, - {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5}, - {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9}, - {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec}, - {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000}, - {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002}, - {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004}, - {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200}, - {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202}, - {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400}, - {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402}, - {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404}, - {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603}, - {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02}, - {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04}, - {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20}, - {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20}, - {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22}, - {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24}, - {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640}, - {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660}, - {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861}, - {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81}, - {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83}, - {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84}, - {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3}, - {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5}, - {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9}, - {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb}, - {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec}, - {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4}, - {0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086}, - {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4}, - {0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086}, - {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000}, - {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000}, - {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501}, - {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501}, - {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03}, - {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04}, - {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04}, - {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, - {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, - {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, - {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, - {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005}, - {0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52}, - {0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84}, - {0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000}, - {0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000}, -}; - static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352}, diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index bdf6f107f6f1..4afe52c0456e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -1006,13 +1006,6 @@ static const u32 ar9485_1_1_soc_preamble[][2] = { {0x00007048, 0x00000002}, }; -static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = { - /* Addr 5G_HT20 5G_HT40 */ - {0x00009e00, 0x03721821, 0x03721821}, - {0x0000a230, 0x0000400b, 0x00004016}, - {0x0000a254, 0x00000898, 0x00001130}, -}; - static const u32 ar9485_1_1_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index e06b74a54a69..13b4f5f50f8a 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -661,7 +661,6 @@ struct ath9k_vif_iter_data { int naps; /* number of AP vifs */ int nmeshes; /* number of mesh vifs */ int nstations; /* number of station vifs */ - int nwds; /* number of WDS vifs */ int nadhocs; /* number of adhoc vifs */ int nocbs; /* number of OCB vifs */ int nbcnvifs; /* number of beaconing vifs */ diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c index 53ca4b063eb9..7aefb79f6bed 100644 --- a/drivers/net/wireless/ath/ath9k/common-debug.c +++ b/drivers/net/wireless/ath/ath9k/common-debug.c @@ -189,7 +189,7 @@ static ssize_t read_file_phy_err(struct file *file, char __user *user_buf, { #define PHY_ERR(s, p) \ len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \ - rxstats->phy_err_stats[p]); + rxstats->phy_err_stats[p]) struct ath_rx_stats *rxstats = file->private_data; char *buf; diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 26ea51a72156..017a43bc400c 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -735,10 +735,10 @@ static int read_file_misc(struct seq_file *file, void *data) ath9k_calculate_iter_data(sc, ctx, &iter_data); seq_printf(file, - "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i", + "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i", i++, (int)(ctx->assigned), iter_data.naps, iter_data.nstations, - iter_data.nmeshes, iter_data.nwds); + iter_data.nmeshes); seq_printf(file, " ADHOC: %i OCB: %i TOTAL: %hi BEACON-VIF: %hi\n", iter_data.nadhocs, iter_data.nocbs, sc->cur_chan->nvifs, sc->nbcnvifs); diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c index 3251c9abe270..2a79c2fa8415 100644 --- a/drivers/net/wireless/ath/ath9k/dfs_debug.c +++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c @@ -26,7 +26,7 @@ static struct ath_dfs_pool_stats dfs_pool_stats = { 0 }; #define ATH9K_DFS_STAT(s, p) \ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \ - sc->debug.stats.dfs_stats.p); + sc->debug.stats.dfs_stats.p) #define ATH9K_DFS_POOL_STAT(s, p) \ len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \ dfs_pool_stats.p); diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index fbeb4a739d32..321ff54fdb42 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -44,9 +44,8 @@ static u32 ath_dynack_get_max_to(struct ath_hw *ah) return 600; } -/** +/* * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation - * */ static inline int ath_dynack_ewma(int old, int new) { @@ -247,8 +246,12 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, ridx = ts->ts_rateindex; da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp; - ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1); - ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2); + + /* ether_addr_copy() gives a false warning on gcc-10 so use memcpy() + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97490 + */ + memcpy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1, ETH_ALEN); + memcpy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2, ETH_ALEN); if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) { const struct ieee80211_rate *rate; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 0bdc4dcb7b8f..8e69e8989f6d 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -297,7 +297,12 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv, tx_hdr.data_type = ATH9K_HTC_NORMAL; } - if (ieee80211_is_data_qos(hdr->frame_control)) { + /* Transmit all frames that should not be reordered relative + * to each other using the same priority. For other QoS data + * frames extract the priority from the header. + */ + if (!(tx_info->control.flags & IEEE80211_TX_CTRL_DONT_REORDER) && + ieee80211_is_data_qos(hdr->frame_control)) { qc = ieee80211_get_qos_ctl(hdr); tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 6609ce122e6e..b66eeb577272 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2308,7 +2308,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) ath_dbg(ath9k_hw_common(ah), BEACON, "%s: unsupported opmode: %d\n", __func__, ah->opmode); return; - break; } REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 690fe3a1b516..42a208787f5a 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -832,12 +832,6 @@ static const struct ieee80211_iface_limit if_limits[] = { BIT(NL80211_IFTYPE_P2P_GO) }, }; -#ifdef CONFIG_WIRELESS_WDS -static const struct ieee80211_iface_limit wds_limits[] = { - { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) }, -}; -#endif - #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT static const struct ieee80211_iface_limit if_limits_multi[] = { @@ -874,15 +868,6 @@ static const struct ieee80211_iface_combination if_comb[] = { BIT(NL80211_CHAN_WIDTH_40), #endif }, -#ifdef CONFIG_WIRELESS_WDS - { - .limits = wds_limits, - .n_limits = ARRAY_SIZE(wds_limits), - .max_interfaces = 2048, - .num_different_channels = 1, - .beacon_int_infra_match = true, - }, -#endif }; #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT @@ -897,7 +882,6 @@ static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw) ieee80211_hw_set(hw, QUEUE_CONTROL); hw->queues = ATH9K_NUM_TX_QUEUES; hw->offchannel_tx_hw_queue = hw->queues - 1; - hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS); hw->wiphy->iface_combinations = if_comb_multi; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi); hw->wiphy->max_scan_ssids = 255; @@ -953,9 +937,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT) | -#ifdef CONFIG_WIRELESS_WDS - BIT(NL80211_IFTYPE_WDS) | -#endif BIT(NL80211_IFTYPE_OCB); if (ath9k_is_chanctx_enabled()) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 8dbf68b94228..caebe3fd6869 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -973,9 +973,6 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, if (vif->bss_conf.enable_beacon) ath9k_vif_iter_set_beacon(iter_data, vif); break; - case NL80211_IFTYPE_WDS: - iter_data->nwds++; - break; default: break; } @@ -1136,8 +1133,6 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ah->opmode = NL80211_IFTYPE_MESH_POINT; else if (iter_data.nocbs) ah->opmode = NL80211_IFTYPE_OCB; - else if (iter_data.nwds) - ah->opmode = NL80211_IFTYPE_AP; else if (iter_data.nadhocs) ah->opmode = NL80211_IFTYPE_ADHOC; else diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c index 19009aafc4e1..bb40889d7c72 100644 --- a/drivers/net/wireless/ath/carl9170/debug.c +++ b/drivers/net/wireless/ath/carl9170/debug.c @@ -45,7 +45,7 @@ #include "cmd.h" #define ADD(buf, off, max, fmt, args...) \ - off += scnprintf(&buf[off], max - off, fmt, ##args); + off += scnprintf(&buf[off], max - off, fmt, ##args) struct carl9170_debugfs_fops { @@ -818,7 +818,7 @@ void carl9170_debugfs_register(struct ar9170 *ar) #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, carl_debugfs_##name ##_ops.attr, \ ar->debug_dir, ar, \ - &carl_debugfs_##name ## _ops.fops); + &carl_debugfs_##name ## _ops.fops) DEBUGFS_ADD(usb_tx_anch_urbs); DEBUGFS_ADD(usb_rx_pool_urbs); diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c index b2eeb9fd68d2..6cdbee5beb07 100644 --- a/drivers/net/wireless/ath/carl9170/mac.c +++ b/drivers/net/wireless/ath/carl9170/mac.c @@ -329,10 +329,6 @@ int carl9170_set_operating_mode(struct ar9170 *ar) /* iwlagn 802.11n STA Workaround */ rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST; break; - case NL80211_IFTYPE_WDS: - cam_mode |= AR9170_MAC_CAM_AP_WDS; - rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST; - break; case NL80211_IFTYPE_STATION: cam_mode |= AR9170_MAC_CAM_STA; rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST; diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index dbef9d8fc893..cca3b086aa70 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -646,7 +646,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: if ((vif->type == NL80211_IFTYPE_STATION) || - (vif->type == NL80211_IFTYPE_WDS) || (vif->type == NL80211_IFTYPE_AP) || (vif->type == NL80211_IFTYPE_MESH_POINT)) break; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 235cf77cd60c..6b8446ff48c8 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -840,6 +840,7 @@ static bool carl9170_tx_rts_check(struct ar9170 *ar, case CARL9170_ERP_RTS: if (likely(!multi)) return true; + break; default: break; diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 0813473793df..80390495ea25 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -23,7 +23,7 @@ /** * struct radar_types - contains array of patterns defined for one DFS domain - * @domain: DFS regulatory domain + * @region: regulatory DFS region * @num_radar_types: number of radar types to follow * @radar_types: radar types array */ @@ -133,8 +133,9 @@ static const struct radar_types *dfs_domains[] = { /** * get_dfs_domain_radar_types() - get radar types for a given DFS domain - * @param domain DFS domain - * @return radar_types ptr on success, NULL if DFS domain is not supported + * @region: regulatory DFS region + * + * Return value: radar_types ptr on success, NULL if DFS domain is not supported */ static const struct radar_types * get_dfs_domain_radar_types(enum nl80211_dfs_regions region) @@ -227,9 +228,10 @@ fail: /** * channel_detector_get() - get channel detector for given frequency - * @param dpd instance pointer - * @param freq frequency in MHz - * @return pointer to channel detector on success, NULL otherwise + * @dpd: DPD instance pointer + * @freq: freq frequency in MHz + * + * Return value: pointer to channel detector on success, NULL otherwise * * Return existing channel detector for the given frequency or return a * newly create one. diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c index 05b0464c6b92..d07c454c9c00 100644 --- a/drivers/net/wireless/ath/dfs_pri_detector.c +++ b/drivers/net/wireless/ath/dfs_pri_detector.c @@ -29,18 +29,17 @@ struct ath_dfs_pool_stats global_dfs_pool_stats = {}; (MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \ MIN + PRI_TOLERANCE : RUNTIME) -/** +/* * struct pulse_elem - elements in pulse queue - * @ts: time stamp in usecs */ struct pulse_elem { struct list_head head; u64 ts; }; -/** +/* * pde_get_multiple() - get number of multiples considering a given tolerance - * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise + * Return value: factor if abs(val - factor*fraction) <= tolerance, 0 otherwise */ static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance) { @@ -70,7 +69,7 @@ static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance) return factor; } -/** +/* * DOC: Singleton Pulse and Sequence Pools * * Instances of pri_sequence and pulse_elem are kept in singleton pools to diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index bee9110b91f3..b2400e2417a5 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -360,6 +360,7 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy, /** * ath_reg_apply_ir_flags() * @wiphy: the wiphy to use + * @reg: regulatory structure - used for country selection * @initiator: the regulatory hint initiator * * If no country IE has been received always enable passive scan diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 706728fba72d..5867bd9c2f64 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -910,6 +910,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, * place where AID is available. */ wcn36xx_smd_config_sta(wcn, vif, sta); + wcn36xx_enable_keep_alive_null_packet(wcn, vif); } else { wcn36xx_dbg(WCN36XX_DBG_MAC, "disassociated bss %pM vif %pM AID=%d\n", @@ -1246,6 +1247,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL); ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR); wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 766400f7b61c..5445277dd8de 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -78,6 +78,7 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = { WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10), WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0), WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 133), /* MCS 5 */ + WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 1000), }; static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = { @@ -162,7 +163,7 @@ static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = { WCN36XX_CFG_VAL(ENABLE_RTSCTS_HTVHT, 0), WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN, 30000), WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_BT_LEN, 120000), - WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 200), + WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 1000), WCN36XX_CFG_VAL(TOGGLE_ARP_BDRATES, 0), WCN36XX_CFG_VAL(OPTIMIZE_CA_EVENT, 0), WCN36XX_CFG_VAL(EXT_SCAN_CONC_MODE, 0), @@ -2175,6 +2176,7 @@ int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif) INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ); msg_body.bss_index = vif_priv->bss_index; + msg_body.send_data_null = 1; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2568,7 +2570,7 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_SCAN_IND_FAILED: case WCN36XX_HAL_SCAN_IND_DEQUEUED: scan_info.aborted = true; - /* fall through */ + fallthrough; case WCN36XX_HAL_SCAN_IND_COMPLETED: mutex_lock(&wcn->scan_lock); wcn->scan_req = NULL; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 421aebbb49e5..8699f8279a8b 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -262,7 +262,7 @@ struct fw_map *wil_find_fw_mapping(const char *section) /** * Check address validity for WMI buffer; remap if needed * @wil: driver data - * @ptr: internal (linker) fw/ucode address + * @ptr_: internal (linker) fw/ucode address * @size: if non zero, validate the block does not * exceed the device memory (bar) * diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index f175dbaffc30..150a366e8f62 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -4961,12 +4961,11 @@ static int b43_op_add_interface(struct ieee80211_hw *hw, struct b43_wldev *dev; int err = -EOPNOTSUPP; - /* TODO: allow WDS/AP devices to coexist */ + /* TODO: allow AP devices to coexist */ if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MESH_POINT && vif->type != NL80211_IFTYPE_STATION && - vif->type != NL80211_IFTYPE_WDS && vif->type != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; @@ -5576,9 +5575,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev) BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_STATION) | -#ifdef CONFIG_WIRELESS_WDS - BIT(NL80211_IFTYPE_WDS) | -#endif BIT(NL80211_IFTYPE_ADHOC); hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index a27125b7922c..7692a2618c97 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3381,11 +3381,10 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw, unsigned long flags; int err = -EOPNOTSUPP; - /* TODO: allow WDS/AP devices to coexist */ + /* TODO: allow AP devices to coexist */ if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_STATION && - vif->type != NL80211_IFTYPE_WDS && vif->type != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; @@ -3805,9 +3804,6 @@ static int b43legacy_wireless_init(struct ssb_device *dev) hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | -#ifdef CONFIG_WIRELESS_WDS - BIT(NL80211_IFTYPE_WDS) | -#endif BIT(NL80211_IFTYPE_ADHOC); hw->queues = 1; /* FIXME: hardware has more queues */ hw->max_rates = 2; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index f9ebb98b0e3c..ce8c102df7b3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -367,7 +367,7 @@ static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr, * @func: SDIO function * @write: direction flag * @addr: dongle memory address as source/destination - * @pkt: skb pointer + * @pktlist: skb buffer head pointer * * This function takes the respbonsibility as the interface function to MMC * stack for block data access. It assumes that the skb passed down by the diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index a2dbbb977d0c..0ee421f30aa2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2137,7 +2137,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, BRCMF_WSEC_MAX_PSK_LEN); else if (profile->use_fwsup == BRCMF_PROFILE_FWSUP_SAE) { /* clean up user-space RSNE */ - if (brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) { + err = brcmf_fil_iovar_data_set(ifp, "wpaie", NULL, 0); + if (err) { bphy_err(drvr, "failed to clean up user-space RSNE\n"); goto done; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 430d2cca98b3..bc3f4e4edcdf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -384,6 +384,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp) * @drvr: driver information object. * @event_packet: event packet to process. * @packet_len: length of the packet + * @gfp: memory allocation flags. * * If the packet buffer contains a firmware event message it will * dispatch the event to a registered handler (using worker). diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 437e83ea8902..19b0f318f93e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -746,7 +746,7 @@ brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea) static struct brcmf_fws_mac_descriptor* brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da) { - struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; + struct brcmf_fws_mac_descriptor *entry; bool multicast; multicast = is_multicast_ether_addr(da); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 39381cbde89e..45bc502fcb34 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -759,6 +759,7 @@ static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo) /** * brcmf_pcie_bus_console_read - reads firmware messages * + * @devinfo: pointer to the device data structure * @error: specifies if error has occurred (prints messages unconditionally) */ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo, @@ -1936,16 +1937,18 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) fwreq = brcmf_pcie_prepare_fw_request(devinfo); if (!fwreq) { ret = -ENOMEM; - goto fail_bus; + goto fail_brcmf; } ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup); if (ret < 0) { kfree(fwreq); - goto fail_bus; + goto fail_brcmf; } return 0; +fail_brcmf: + brcmf_free(&devinfo->pdev->dev); fail_bus: kfree(bus->msgbuf); kfree(bus); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 99987a789e7e..16ed325795a8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -625,6 +625,10 @@ BRCMF_FW_DEF(4359, "brcmfmac4359-sdio"); BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); BRCMF_FW_DEF(43012, "brcmfmac43012-sdio"); +/* firmware config files */ +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-sdio.*.txt"); +MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-pcie.*.txt"); + static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), @@ -1340,7 +1344,7 @@ static void brcmf_sdio_free_glom(struct brcmf_sdio *bus) static inline u8 brcmf_sdio_getdatoffset(u8 *swheader) { u32 hdrvalue; - hdrvalue = *(u32 *)swheader; + hdrvalue = le32_to_cpu(*(__le32 *)swheader); return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT); } @@ -1349,7 +1353,7 @@ static inline bool brcmf_sdio_fromevntchan(u8 *swheader) u32 hdrvalue; u8 ret; - hdrvalue = *(u32 *)swheader; + hdrvalue = le32_to_cpu(*(__le32 *)swheader); ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT); return (ret == SDPCM_EVENT_CHANNEL); @@ -3517,6 +3521,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) struct brcmf_sdio *bus = sdiodev->bus; struct brcmf_core *core = bus->sdio_core; u32 value; + __le32 iovar; int err; /* maxctl provided by common layer */ @@ -3537,16 +3542,16 @@ static int brcmf_sdio_bus_preinit(struct device *dev) */ if (core->rev < 12) { /* for sdio core rev < 12, disable txgloming */ - value = 0; - err = brcmf_iovar_data_set(dev, "bus:txglom", &value, - sizeof(u32)); + iovar = 0; + err = brcmf_iovar_data_set(dev, "bus:txglom", &iovar, + sizeof(iovar)); } else { /* otherwise, set txglomalign */ value = sdiodev->settings->bus.sdio.sd_sgentry_align; /* SDIO ADMA requires at least 32 bit alignment */ - value = max_t(u32, value, ALIGNMENT); - err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, - sizeof(u32)); + iovar = cpu_to_le32(max_t(u32, value, ALIGNMENT)); + err = brcmf_iovar_data_set(dev, "bus:txglomalign", &iovar, + sizeof(iovar)); } if (err < 0) @@ -3555,9 +3560,9 @@ static int brcmf_sdio_bus_preinit(struct device *dev) bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; if (sdiodev->sg_support) { bus->txglom = false; - value = 1; + iovar = cpu_to_le32(1); err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", - &value, sizeof(u32)); + &iovar, sizeof(iovar)); if (err < 0) { /* bus:rxglom is allowed to fail */ err = 0; @@ -4541,6 +4546,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) brcmf_sdiod_intr_unregister(bus->sdiodev); brcmf_detach(bus->sdiodev->dev); + brcmf_free(bus->sdiodev->dev); cancel_work_sync(&bus->datawork); if (bus->brcmf_wq) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c index c9fb4b0cffaf..2631eb7569eb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c @@ -942,14 +942,19 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, index = TX_SEQ_TO_INDEX(seq); ack_recd = false; if (ba_recd) { + int block_acked; + bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX); + if (bindex < AMPDU_TX_BA_MAX_WSIZE) + block_acked = isset(bitmap, bindex); + else + block_acked = 0; brcms_dbg_ht(wlc->hw->d11core, "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n", tid, seq, start_seq, bindex, - isset(bitmap, bindex), index); + block_acked, index); /* if acked then clear bit and free packet */ - if ((bindex < AMPDU_TX_BA_MAX_WSIZE) - && isset(bitmap, bindex)) { + if (block_acked) { ini->txretry[index] = 0; /* diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 87b9398b03fd..e35e1380ae43 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1115,7 +1115,8 @@ static int enable_MAC(struct airo_info *ai, int lock); static void disable_MAC(struct airo_info *ai, int lock); static void enable_interrupts(struct airo_info*); static void disable_interrupts(struct airo_info*); -static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp); +static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp, + bool may_sleep); static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap); static int aux_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen, int whichbap); @@ -1130,8 +1131,10 @@ static int PC4500_writerid(struct airo_info*, u16 rid, const void static int do_writerid(struct airo_info*, u16 rid, const void *rid_data, int len, int dummy); static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw); -static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket); -static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket); +static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket, + bool may_sleep); +static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket, + bool may_sleep); static int mpi_send_packet(struct net_device *dev); static void mpi_unmap_card(struct pci_dev *pci); @@ -1144,7 +1147,6 @@ static int airo_thread(void *data); static void timer_func(struct net_device *dev); static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev); -static void airo_read_wireless_stats(struct airo_info *local); #ifdef CISCO_EXT static int readrids(struct net_device *dev, aironet_ioctl *comp); static int writerids(struct net_device *dev, aironet_ioctl *comp); @@ -1200,7 +1202,6 @@ struct airo_info { #define JOB_MIC 5 #define JOB_EVENT 6 #define JOB_AUTOWEP 7 -#define JOB_WSTATS 8 #define JOB_SCAN_RESULTS 9 unsigned long jobs; int (*bap_read)(struct airo_info*, __le16 *pu16Dst, int bytelen, @@ -1755,7 +1756,7 @@ static int readBSSListRid(struct airo_info *ai, int first, if (down_interruptible(&ai->sem)) return -ERESTARTSYS; ai->list_bss_task = current; - issuecommand(ai, &cmd, &rsp); + issuecommand(ai, &cmd, &rsp, true); up(&ai->sem); /* Let the command take effect */ schedule_timeout_uninterruptible(3 * HZ); @@ -2098,7 +2099,7 @@ static void get_tx_error(struct airo_info *ai, s32 fid) } } -static void airo_end_xmit(struct net_device *dev) +static void airo_end_xmit(struct net_device *dev, bool may_sleep) { u16 status; int i; @@ -2109,7 +2110,7 @@ static void airo_end_xmit(struct net_device *dev) clear_bit(JOB_XMIT, &priv->jobs); clear_bit(FLAG_PENDING_XMIT, &priv->flags); - status = transmit_802_3_packet (priv, fids[fid], skb->data); + status = transmit_802_3_packet(priv, fids[fid], skb->data, may_sleep); up(&priv->sem); i = 0; @@ -2166,11 +2167,11 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb, set_bit(JOB_XMIT, &priv->jobs); wake_up_interruptible(&priv->thr_wait); } else - airo_end_xmit(dev); + airo_end_xmit(dev, false); return NETDEV_TX_OK; } -static void airo_end_xmit11(struct net_device *dev) +static void airo_end_xmit11(struct net_device *dev, bool may_sleep) { u16 status; int i; @@ -2181,7 +2182,7 @@ static void airo_end_xmit11(struct net_device *dev) clear_bit(JOB_XMIT11, &priv->jobs); clear_bit(FLAG_PENDING_XMIT11, &priv->flags); - status = transmit_802_11_packet (priv, fids[fid], skb->data); + status = transmit_802_11_packet(priv, fids[fid], skb->data, may_sleep); up(&priv->sem); i = MAX_FIDS / 2; @@ -2245,7 +2246,7 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb, set_bit(JOB_XMIT11, &priv->jobs); wake_up_interruptible(&priv->thr_wait); } else - airo_end_xmit11(dev); + airo_end_xmit11(dev, false); return NETDEV_TX_OK; } @@ -2288,18 +2289,14 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev) struct airo_info *local = dev->ml_priv; if (!test_bit(JOB_STATS, &local->jobs)) { - /* Get stats out of the card if available */ - if (down_trylock(&local->sem) != 0) { - set_bit(JOB_STATS, &local->jobs); - wake_up_interruptible(&local->thr_wait); - } else - airo_read_stats(dev); + set_bit(JOB_STATS, &local->jobs); + wake_up_interruptible(&local->thr_wait); } return &dev->stats; } -static void airo_set_promisc(struct airo_info *ai) +static void airo_set_promisc(struct airo_info *ai, bool may_sleep) { Cmd cmd; Resp rsp; @@ -2308,7 +2305,7 @@ static void airo_set_promisc(struct airo_info *ai) cmd.cmd = CMD_SETMODE; clear_bit(JOB_PROMISC, &ai->jobs); cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC; - issuecommand(ai, &cmd, &rsp); + issuecommand(ai, &cmd, &rsp, may_sleep); up(&ai->sem); } @@ -2322,7 +2319,7 @@ static void airo_set_multicast_list(struct net_device *dev) set_bit(JOB_PROMISC, &ai->jobs); wake_up_interruptible(&ai->thr_wait); } else - airo_set_promisc(ai); + airo_set_promisc(ai, false); } if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) { @@ -2482,7 +2479,7 @@ static int mpi_init_descriptors (struct airo_info *ai) cmd.parm0 = FID_RX; cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux); cmd.parm2 = MPI_MAX_FIDS; - rc = issuecommand(ai, &cmd, &rsp); + rc = issuecommand(ai, &cmd, &rsp, true); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate RX FID"); return rc; @@ -2510,7 +2507,7 @@ static int mpi_init_descriptors (struct airo_info *ai) } ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */ - rc = issuecommand(ai, &cmd, &rsp); + rc = issuecommand(ai, &cmd, &rsp, true); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate TX FID"); return rc; @@ -2524,7 +2521,7 @@ static int mpi_init_descriptors (struct airo_info *ai) cmd.parm0 = RID_RW; cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux); cmd.parm2 = 1; /* Magic number... */ - rc = issuecommand(ai, &cmd, &rsp); + rc = issuecommand(ai, &cmd, &rsp, true); if (rc != SUCCESS) { airo_print_err(ai->dev->name, "Couldn't allocate RID"); return rc; @@ -3150,15 +3147,13 @@ static int airo_thread(void *data) } if (test_bit(JOB_XMIT, &ai->jobs)) - airo_end_xmit(dev); + airo_end_xmit(dev, true); else if (test_bit(JOB_XMIT11, &ai->jobs)) - airo_end_xmit11(dev); + airo_end_xmit11(dev, true); else if (test_bit(JOB_STATS, &ai->jobs)) airo_read_stats(dev); - else if (test_bit(JOB_WSTATS, &ai->jobs)) - airo_read_wireless_stats(ai); else if (test_bit(JOB_PROMISC, &ai->jobs)) - airo_set_promisc(ai); + airo_set_promisc(ai, true); else if (test_bit(JOB_MIC, &ai->jobs)) micinit(ai); else if (test_bit(JOB_EVENT, &ai->jobs)) @@ -3281,11 +3276,9 @@ static void airo_handle_link(struct airo_info *ai) set_bit(FLAG_UPDATE_UNI, &ai->flags); set_bit(FLAG_UPDATE_MULTI, &ai->flags); - if (down_trylock(&ai->sem) != 0) { - set_bit(JOB_EVENT, &ai->jobs); - wake_up_interruptible(&ai->thr_wait); - } else - airo_send_event(ai->dev); + set_bit(JOB_EVENT, &ai->jobs); + wake_up_interruptible(&ai->thr_wait); + netif_carrier_on(ai->dev); } else if (!scan_forceloss) { if (auto_wep && !ai->expires) { @@ -3609,7 +3602,7 @@ static int enable_MAC(struct airo_info *ai, int lock) if (!test_bit(FLAG_ENABLED, &ai->flags)) { memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_ENABLE; - rc = issuecommand(ai, &cmd, &rsp); + rc = issuecommand(ai, &cmd, &rsp, true); if (rc == SUCCESS) set_bit(FLAG_ENABLED, &ai->flags); } else @@ -3641,7 +3634,7 @@ static void disable_MAC(struct airo_info *ai, int lock) netif_carrier_off(ai->dev); memset(&cmd, 0, sizeof(cmd)); cmd.cmd = MAC_DISABLE; // disable in case already enabled - issuecommand(ai, &cmd, &rsp); + issuecommand(ai, &cmd, &rsp, true); clear_bit(FLAG_ENABLED, &ai->flags); } if (lock == 1) @@ -3844,7 +3837,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) cmd.parm0 = cmd.parm1 = cmd.parm2 = 0; if (lock && down_interruptible(&ai->sem)) return ERROR; - if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { + if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) { if (lock) up(&ai->sem); return ERROR; @@ -3854,7 +3847,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) // Let's figure out if we need to use the AUX port if (!test_bit(FLAG_MPI,&ai->flags)) { cmd.cmd = CMD_ENABLEAUX; - if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { + if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) { if (lock) up(&ai->sem); airo_print_err(ai->dev->name, "Error checking for AUX port"); @@ -3966,7 +3959,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock) return SUCCESS; } -static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) +static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp, + bool may_sleep) { // Im really paranoid about letting it run forever! int max_tries = 600000; @@ -3983,8 +3977,8 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) if ((IN4500(ai, COMMAND)) == pCmd->cmd) // PC4500 didn't notice command, try again OUT4500(ai, COMMAND, pCmd->cmd); - if (!in_atomic() && (max_tries & 255) == 0) - schedule(); + if (may_sleep && (max_tries & 255) == 0) + cond_resched(); } if (max_tries == -1) { @@ -4141,7 +4135,7 @@ static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd) memset(&cmd, 0, sizeof(cmd)); cmd.cmd = accmd; cmd.parm0 = rid; - status = issuecommand(ai, &cmd, &rsp); + status = issuecommand(ai, &cmd, &rsp, true); if (status != 0) return status; if ((rsp.status & 0x7F00) != 0) { return (accmd << 8) + (rsp.rsp0 & 0xFF); @@ -4177,7 +4171,7 @@ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, in memcpy_toio(ai->config_desc.card_ram_off, &ai->config_desc.rid_desc, sizeof(Rid)); - rc = issuecommand(ai, &cmd, &rsp); + rc = issuecommand(ai, &cmd, &rsp, true); if (rsp.status & 0x7f00) rc = rsp.rsp0; @@ -4256,7 +4250,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid, memcpy(ai->config_desc.virtual_host_addr, pBuf, len); - rc = issuecommand(ai, &cmd, &rsp); + rc = issuecommand(ai, &cmd, &rsp, true); if ((rc & 0xff00) != 0) { airo_print_err(ai->dev->name, "%s: Write rid Error %d", __func__, rc); @@ -4302,7 +4296,7 @@ static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw) cmd.parm0 = lenPayload; if (down_interruptible(&ai->sem)) return ERROR; - if (issuecommand(ai, &cmd, &rsp) != SUCCESS) { + if (issuecommand(ai, &cmd, &rsp, true) != SUCCESS) { txFid = ERROR; goto done; } @@ -4348,7 +4342,8 @@ done: /* In general BAP1 is dedicated to transmiting packets. However, since we need a BAP when accessing RIDs, we also use BAP1 for that. Make sure the BAP1 spinlock is held when this is called. */ -static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) +static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket, + bool may_sleep) { __le16 payloadLen; Cmd cmd; @@ -4386,12 +4381,14 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket) memset(&cmd, 0, sizeof(cmd)); cmd.cmd = CMD_TRANSMIT; cmd.parm0 = txFid; - if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR; + if (issuecommand(ai, &cmd, &rsp, may_sleep) != SUCCESS) + return ERROR; if ((rsp.status & 0xFF00) != 0) return ERROR; return SUCCESS; } -static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket) +static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket, + bool may_sleep) { __le16 fc, payloadLen; Cmd cmd; @@ -4426,7 +4423,8 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket) memset(&cmd, 0, sizeof(cmd)); cmd.cmd = CMD_TRANSMIT; cmd.parm0 = txFid; - if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR; + if (issuecommand(ai, &cmd, &rsp, may_sleep) != SUCCESS) + return ERROR; if ((rsp.status & 0xFF00) != 0) return ERROR; return SUCCESS; } @@ -5490,7 +5488,7 @@ static int proc_BSSList_open(struct inode *inode, struct file *file) kfree(file->private_data); return -ERESTARTSYS; } - issuecommand(ai, &cmd, &rsp); + issuecommand(ai, &cmd, &rsp, true); up(&ai->sem); data->readlen = 0; return 0; @@ -5627,7 +5625,7 @@ static int __maybe_unused airo_pci_suspend(struct device *dev_d) netif_device_detach(dev); ai->power = PMSG_SUSPEND; cmd.cmd = HOSTSLEEP; - issuecommand(ai, &cmd, &rsp); + issuecommand(ai, &cmd, &rsp, true); device_wakeup_enable(dev_d); return 0; @@ -5787,7 +5785,7 @@ static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid) } #define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0) -#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50); +#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50) /*------------------------------------------------------------------*/ /* @@ -5970,7 +5968,7 @@ static int airo_set_wap(struct net_device *dev, cmd.cmd = CMD_LOSE_SYNC; if (down_interruptible(&local->sem)) return -ERESTARTSYS; - issuecommand(local, &cmd, &rsp); + issuecommand(local, &cmd, &rsp, true); up(&local->sem); } else { memset(APList_rid, 0, sizeof(*APList_rid)); @@ -7067,6 +7065,7 @@ static int airo_set_power(struct net_device *dev, local->config.rmode &= ~RXMODE_MASK; local->config.rmode |= RXMODE_BC_MC_ADDR; set_bit (FLAG_COMMIT, &local->flags); + break; case IW_POWER_ON: /* This is broken, fixme ;-) */ break; @@ -7268,7 +7267,7 @@ static int airo_set_scan(struct net_device *dev, ai->scan_timeout = RUN_AT(3*HZ); memset(&cmd, 0, sizeof(cmd)); cmd.cmd = CMD_LISTBSS; - issuecommand(ai, &cmd, &rsp); + issuecommand(ai, &cmd, &rsp, true); wake = 1; out: @@ -7535,7 +7534,7 @@ static int airo_config_commit(struct net_device *dev, writeConfigRid(local, 0); enable_MAC(local, 0); if (test_bit (FLAG_RESET, &local->flags)) - airo_set_promisc(local); + airo_set_promisc(local, true); else up(&local->sem); @@ -7732,15 +7731,12 @@ static void airo_read_wireless_stats(struct airo_info *local) __le32 *vals = stats_rid.vals; /* Get stats out of the card */ - clear_bit(JOB_WSTATS, &local->jobs); - if (local->power.event) { - up(&local->sem); + if (local->power.event) return; - } + readCapabilityRid(local, &cap_rid, 0); readStatusRid(local, &status_rid, 0); readStatsRid(local, &stats_rid, RID_STATS, 0); - up(&local->sem); /* The status */ local->wstats.status = le16_to_cpu(status_rid.mode); @@ -7783,15 +7779,10 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) { struct airo_info *local = dev->ml_priv; - if (!test_bit(JOB_WSTATS, &local->jobs)) { - /* Get stats out of the card if available */ - if (down_trylock(&local->sem) != 0) { - set_bit(JOB_WSTATS, &local->jobs); - wake_up_interruptible(&local->thr_wait); - } else - airo_read_wireless_stats(local); + if (!down_interruptible(&local->sem)) { + airo_read_wireless_stats(local); + up(&local->sem); } - return &local->wstats; } diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index d2bbe6a73514..7220fc8fd9b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -1,56 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015-2017 Intel Deutschland GmbH +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - + */ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" @@ -92,6 +44,7 @@ #define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-" #define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-" #define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-" +#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-" #define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \ IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" @@ -127,6 +80,8 @@ IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode" #define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \ IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode" +#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \ + IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -155,7 +110,8 @@ static const struct iwl_base_params iwl_ax210_base_params = { static const struct iwl_ht_params iwl_22000_ht_params = { .stbc = true, .ldpc = true, - .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | + BIT(NL80211_BAND_6GHZ), }; #define IWL_DEVICE_22000_COMMON \ @@ -647,6 +603,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = { .num_rbds = IWL_NUM_RBDS_AX210_HE, }; +const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = { + .fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE, + .uhb_supported = true, + IWL_DEVICE_AX210, + .num_rbds = IWL_NUM_RBDS_AX210_HE, +}; + MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); @@ -664,3 +627,4 @@ MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index b72993e07fc2..c542140e534e 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 Intel Deutschland GmbH + */ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 280d84fa5cb1..4ff8a56414a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2014, 2018-2019 Intel Corporation + * Copyright (C) 2014-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016 Intel Deutschland GmbH + */ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index be4acf4a0e32..eb5db204d84b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -1,56 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index 254a5ce52456..1276df1c7a55 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + */ #ifndef __iwl_agn_h__ #define __iwl_agn_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c index 974e1c324ca7..a11884fa254b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014 Intel Corporation + */ #include <linux/slab.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h index c43ba94bfa8b..a89b5bad75ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h @@ -1,59 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + */ #ifndef __iwl_calib_h__ #define __iwl_calib_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index fdcc1292a92b..235c7a2e3483 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + */ /* * Please use this file (commands.h) only for uCode API definitions. * Please use iwl-xxxx-hw.h for hardware-related definitions. diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 423d3c396b2d..75e7665773c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -619,7 +619,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_CCMP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; break; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 9d55ece05020..80475c7a6fba 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -3,7 +3,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018, 2020 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portionhelp of the ieee80211 subsystem header files. @@ -582,7 +582,7 @@ static int iwlagn_set_decrypted_flag(struct iwl_priv *priv, if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_KEY_TTAK) break; - /* fall through */ + fallthrough; case RX_RES_STATUS_SEC_TYPE_WEP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_ICV_MIC) { @@ -591,7 +591,7 @@ static int iwlagn_set_decrypted_flag(struct iwl_priv *priv, IWL_DEBUG_RX(priv, "Packet destroyed\n"); return -1; } - /* fall through */ + fallthrough; case RX_RES_STATUS_SEC_TYPE_CCMP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_DECRYPT_OK) { @@ -720,7 +720,7 @@ static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; break; } - /* fall through */ + fallthrough; default: if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; @@ -786,7 +786,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, struct iwl_rx_phy_res *phy_res; __le32 rx_pkt_status; struct iwl_rx_mpdu_res_start *amsdu; - u32 len; + u32 len, pkt_len = iwl_rx_packet_len(pkt); u32 ampdu_status; u32 rate_n_flags; @@ -794,10 +794,22 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv, IWL_ERR(priv, "MPDU frame without cached PHY data\n"); return; } + + if (unlikely(pkt_len < sizeof(*amsdu))) { + IWL_DEBUG_DROP(priv, "Bad REPLY_RX_MPDU_CMD size\n"); + return; + } + phy_res = &priv->last_phy_res; amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data; header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu)); len = le16_to_cpu(amsdu->byte_count); + + if (unlikely(len + sizeof(*amsdu) + sizeof(__le32) > pkt_len)) { + IWL_DEBUG_DROP(priv, "FW lied about packet len\n"); + return; + } + rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len); ampdu_status = iwlagn_translate_rx_status(priv, le32_to_cpu(rx_pkt_status)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index 832fcbb787e9..c4ecf6ed2186 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -405,7 +405,7 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; limit /= 2; dwell_time = min(limit, dwell_time); - /* fall through */ + fallthrough; case 1: limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; limit /= n_active; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c index e622948661fa..ddc14059b07d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c @@ -1109,7 +1109,7 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv, break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_WEP40: key_flags |= STA_KEY_FLG_WEP; memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index e3962bb52328..847b8e07f81c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -210,7 +210,7 @@ static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv, case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 3e5a35e26ad3..15248b064380 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2019-2020 Intel Corporation + */ #include <linux/uuid.h> #include "iwl-drv.h" #include "iwl-debug.h" @@ -113,11 +58,11 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method) } IWL_EXPORT_SYMBOL(iwl_acpi_get_object); -/** -* Generic function for evaluating a method defined in the device specific -* method (DSM) interface. The returned acpi object must be freed by calling -* function. -*/ +/* + * Generic function for evaluating a method defined in the device specific + * method (DSM) interface. The returned acpi object must be freed by calling + * function. + */ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, union acpi_object *args) { @@ -134,7 +79,7 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, return obj; } -/** +/* * Evaluate a DSM with no arguments and a single u8 return value (inside a * buffer object), verify and return that value. */ @@ -229,8 +174,8 @@ found: IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg); int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - __le32 *black_list_array, - int *black_list_size) + __le32 *block_list_array, + int *block_list_size) { union acpi_object *wifi_pkg, *data; int ret, tbl_rev, i; @@ -257,7 +202,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, enabled = !!wifi_pkg->package.elements[0].integer.value; if (!enabled) { - *black_list_size = -1; + *block_list_size = -1; IWL_DEBUG_RADIO(fwrt, "TAS not enabled\n"); ret = 0; goto out_free; @@ -271,17 +216,17 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, ret = -EINVAL; goto out_free; } - *black_list_size = wifi_pkg->package.elements[1].integer.value; + *block_list_size = wifi_pkg->package.elements[1].integer.value; - IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *black_list_size); - if (*black_list_size > APCI_WTAS_BLACK_LIST_MAX) { + IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size); + if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) { IWL_DEBUG_RADIO(fwrt, "TAS invalid array size value %u\n", - *black_list_size); + *block_list_size); ret = -EINVAL; goto out_free; } - for (i = 0; i < *black_list_size; i++) { + for (i = 0; i < *block_list_size; i++) { u32 country; if (wifi_pkg->package.elements[2 + i].type != @@ -293,8 +238,8 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, } country = wifi_pkg->package.elements[2 + i].integer.value; - black_list_array[i] = cpu_to_le32(country); - IWL_DEBUG_RADIO(fwrt, "TAS black list country %d\n", country); + block_list_array[i] = cpu_to_le32(country); + IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country); } ret = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index bddf8a44e163..042dd247d387 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ @@ -100,7 +46,7 @@ #define ACPI_ECKV_WIFI_DATA_SIZE 2 /* - * 1 type, 1 enabled, 1 black list size, 16 black list array + * 1 type, 1 enabled, 1 block list size, 16 block list array */ #define APCI_WTAS_BLACK_LIST_MAX 16 #define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX) @@ -197,8 +143,8 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt); int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt, struct iwl_per_chain_offset *table, u32 n_bands); -int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array, - int *black_list_size); +int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array, + int *block_list_size); #else /* CONFIG_ACPI */ @@ -269,8 +215,8 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) } static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, - __le32 *black_list_array, - int *black_list_size) + __le32 *block_list_array, + int *block_list_size) { return -ENOENT; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index a1cac47395bc..c840a97e6a62 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_alive_h__ #define __iwl_fw_api_alive_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h index 6cb22a9a9380..29e2816e7052 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_binding_h__ #define __iwl_fw_api_binding_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h index ea4a3f04a83a..d130d4f85444 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_cmdhdr_h__ #define __iwl_fw_api_cmdhdr_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 68060085010f..01580c9175f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2013-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_coex_h__ #define __iwl_fw_api_coex_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 8cc36dbb2311..b916b38b3092 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #ifndef __iwl_fw_api_commands_h__ #define __iwl_fw_api_commands_h__ @@ -323,7 +267,7 @@ enum iwl_legacy_cmds { /** * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: - * update scan offload (scheduled scan) profiles/blacklist/etc. + * update scan offload (scheduled scan) profiles/blocklist/etc. */ SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h index 546fa60ed9fd..1ab92f62c414 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_config_h__ #define __iwl_fw_api_config_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h index 2f0d7c498b3e..105ba7170c3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_context_h__ #define __iwl_fw_api_context_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 5db301a6a312..758639084e0c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_d3_h__ #define __iwl_fw_api_d3_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 93c06e6c1ced..b472f08b06e6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_datapath_h__ #define __iwl_fw_api_datapath_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 95ada51d3f9e..996d5cc5bd9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright (C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright (C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2018-2020 Intel Corporation + */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index 94b1a1268476..ace0ef46001a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_debug_h__ #define __iwl_fw_api_debug_h__ @@ -79,6 +24,12 @@ enum iwl_debug_cmds { */ UMAC_RD_WR = 0x1, /** + * @HOST_EVENT_CFG: + * updates the enabled event severities + * &struct iwl_dbg_host_event_cfg_cmd + */ + HOST_EVENT_CFG = 0x3, + /** * @DBGC_SUSPEND_RESUME: * DBGC suspend/resume commad. Uses a single dword as data: * 0 - resume DBGC recording @@ -395,4 +346,12 @@ struct iwl_buf_alloc_cmd { struct iwl_buf_alloc_frag frags[BUF_ALLOC_MAX_NUM_FRAGS]; } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_2 */ +/** + * struct iwl_dbg_host_event_cfg_cmd + * @enabled_severities: enabled severities + */ +struct iwl_dbg_host_event_cfg_cmd { + __le32 enabled_severities; +} __packed; /* DEBUG_HOST_EVENT_CFG_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h index befc3b126041..dd62a63956b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_filter_h__ #define __iwl_fw_api_filter_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/led.h b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h index b30c9d229d6e..475bb4640ea7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/led.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h @@ -1,61 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_led_h__ #define __iwl_fw_api_led_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 465a8e3974e8..28aa28138908 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index e7a1acedbcf1..6610d1234f74 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_mac_cfg_h__ #define __iwl_fw_api_mac_cfg_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 260f9978a6ef..2d03d7bb5da5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_mac_h__ #define __iwl_fw_api_mac_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 55573168444e..b2706209b7d7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_nvm_reg_h__ #define __iwl_fw_api_nvm_reg_h__ @@ -449,12 +391,12 @@ enum iwl_mcc_source { #define IWL_TAS_BLACK_LIST_MAX 16 /** * struct iwl_tas_config_cmd - configures the TAS - * @black_list_size: size of relevant field in black_list_array - * @black_list_array: black list countries (without TAS) + * @block_list_size: size of relevant field in block_list_array + * @block_list_array: block list countries (without TAS) */ struct iwl_tas_config_cmd { - __le32 black_list_size; - __le32 black_list_array[IWL_TAS_BLACK_LIST_MAX]; + __le32 block_list_size; + __le32 block_list_array[IWL_TAS_BLACK_LIST_MAX]; } __packed; /* TAS_CONFIG_CMD_API_S_VER_2 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index 53cab993068f..f06214d418aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_offload_h__ #define __iwl_fw_api_offload_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h index 721b9fed7201..db59a1897fb1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_paging_h__ #define __iwl_fw_api_paging_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index e6a069683462..68b788b92b7a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -1,71 +1,16 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_phy_ctxt_h__ #define __iwl_fw_api_phy_ctxt_h__ /* Supported bands */ #define PHY_BAND_5 (0) #define PHY_BAND_24 (1) +#define PHY_BAND_6 (2) /* Supported channel width, vary if there is VHT support */ #define PHY_VHT_CHANNEL_MODE20 (0x0) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index 0debca6dd037..d07a632f1af7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_phy_h__ #define __iwl_fw_api_phy_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 4e6ad1793d0a..798417182d54 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_power_h__ #define __iwl_fw_api_power_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 1ea54f643030..fc2fa49e9825 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -1,64 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_rs_h__ #define __iwl_fw_api_rs_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 8a8a204bfe26..821ed472ccff 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_rx_h__ #define __iwl_fw_api_rx_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 5cc33a1b7172..931c0f48de99 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_scan_h__ #define __iwl_fw_api_scan_h__ @@ -117,12 +59,12 @@ enum scan_framework_client { }; /** - * struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S + * struct iwl_scan_offload_blocklist - SCAN_OFFLOAD_BLACKLIST_S * @ssid: MAC address to filter out * @reported_rssi: AP rssi reported to the host * @client_bitmap: clients ignore this entry - enum scan_framework_client */ -struct iwl_scan_offload_blacklist { +struct iwl_scan_offload_blocklist { u8 ssid[ETH_ALEN]; u8 reported_rssi; u8 client_bitmap; @@ -162,7 +104,7 @@ struct iwl_scan_offload_profile { /** * struct iwl_scan_offload_profile_cfg_data - * @blacklist_len: length of blacklist + * @blocklist_len: length of blocklist * @num_profiles: num of profiles in the list * @match_notify: clients waiting for match found notification * @pass_match: clients waiting for the results @@ -171,7 +113,7 @@ struct iwl_scan_offload_profile { * @reserved: reserved */ struct iwl_scan_offload_profile_cfg_data { - u8 blacklist_len; + u8 blocklist_len; u8 num_profiles; u8 match_notify; u8 pass_match; @@ -530,6 +472,11 @@ enum iwl_channel_flags { IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), }; +enum iwl_uhb_chan_cfg_flags { + IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES = BIT(24), + IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN = BIT(25), + IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE = BIT(26), +}; /** * struct iwl_scan_dwell * @active: default dwell time for active scan diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h index e517b55f1bc6..be4ff79fa1bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_sf_h__ #define __iwl_fw_api_sf_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h index 0c6d7b3e1324..c5df1171462b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/soc.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_soc_h__ #define __iwl_fw_api_soc_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index 052413eef059..12b2f2c48387 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012-2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_sta_h__ #define __iwl_fw_api_sta_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index d41cab4016fe..18cca15caa3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018, 2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_stats_h__ #define __iwl_fw_api_stats_h__ #include "mac.h" @@ -95,7 +40,7 @@ struct mvm_statistics_div { * @interference_data_flag: flag for interference data availability. 1 when data * is available. * @channel_load: counts RX Enable time in uSec - * @beacon_rssi_a: beacon RSSI on anntena A + * @beacon_rssi_a: beacon RSSI on antenna A * @beacon_rssi_b: beacon RSSI on antenna B * @beacon_rssi_c: beacon RSSI on antenna C * @beacon_energy_a: beacon energy on antenna A diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h index b089285ac466..14d35000abed 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_tdls_h__ #define __iwl_fw_api_tdls_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 53b438d709db..904cd78a9fa0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_time_event_h__ #define __iwl_fw_api_time_event_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index de2e2ca7a3ea..b2d8ccf5f5dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_tx_h__ #define __iwl_fw_api_tx_h__ #include <linux/ieee80211.h> diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 75d958bab0e3..8b3a00df41da 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2019-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_api_txq_h__ #define __iwl_fw_api_txq_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index ab4a8b942c81..0f0a6727701b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #include <linux/devcoredump.h> #include "iwl-drv.h" #include "runtime.h" @@ -1066,9 +1009,10 @@ struct iwl_dump_ini_region_data { struct iwl_fwrt_dump_data *dump_data; }; -static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt, - struct iwl_dump_ini_region_data *reg_data, - void *range_ptr, int idx) +static int +iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, int idx) { struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; struct iwl_fw_ini_error_dump_range *range = range_ptr; @@ -1090,6 +1034,58 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt, return sizeof(*range) + le32_to_cpu(range->range_data_size); } +static int +iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime *fwrt, + struct iwl_dump_ini_region_data *reg_data, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + u32 indirect_wr_addr = WMAL_INDRCT_RD_CMD1; + u32 indirect_rd_addr = WMAL_MRSPF_1; + u32 prph_val; + u32 addr = le32_to_cpu(reg->addrs[idx]); + u32 dphy_state; + u32 dphy_addr; + unsigned long flags; + int i; + + range->internal_base_addr = cpu_to_le32(addr); + range->range_data_size = reg->dev_addr.size; + + if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + indirect_wr_addr = WMAL_INDRCT_CMD1; + + indirect_wr_addr += le32_to_cpu(reg->dev_addr.offset); + indirect_rd_addr += le32_to_cpu(reg->dev_addr.offset); + + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) + return -EBUSY; + + dphy_addr = (reg->dev_addr.offset) ? WFPM_LMAC2_PS_CTL_RW : + WFPM_LMAC1_PS_CTL_RW; + dphy_state = iwl_read_umac_prph_no_grab(fwrt->trans, dphy_addr); + + for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { + if (dphy_state == HBUS_TIMEOUT || + (dphy_state & WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK) != + WFPM_PHYRF_STATE_ON) { + *val++ = cpu_to_le32(WFPM_DPHY_OFF); + continue; + } + + iwl_write_prph_no_grab(fwrt->trans, indirect_wr_addr, + WMAL_INDRCT_CMD(addr + i)); + prph_val = iwl_read_prph_no_grab(fwrt->trans, + indirect_rd_addr); + *val++ = cpu_to_le32(prph_val); + } + + iwl_trans_release_nic_access(fwrt->trans, &flags); + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, struct iwl_dump_ini_region_data *reg_data, void *range_ptr, int idx) @@ -1609,6 +1605,11 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, data->write_ptr = iwl_get_mon_reg(fwrt, alloc_id, &addrs->write_ptr); + if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + u32 wrt_ptr = le32_to_cpu(data->write_ptr); + + data->write_ptr = cpu_to_le32(wrt_ptr >> 2); + } data->cycle_cnt = iwl_get_mon_reg(fwrt, alloc_id, &addrs->cycle_cnt); data->cur_frag = iwl_get_mon_reg(fwrt, alloc_id, @@ -1923,7 +1924,7 @@ struct iwl_dump_ini_mem_ops { * * @fwrt: fw runtime struct * @list: list to add the dump tlv to - * @reg: memory region + * @reg_data: memory region * @ops: memory dump operations */ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list, @@ -2152,9 +2153,14 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { .get_num_of_ranges = iwl_dump_ini_mem_ranges, .get_size = iwl_dump_ini_mem_get_size, .fill_mem_hdr = iwl_dump_ini_mem_fill_header, - .fill_range = iwl_dump_ini_prph_iter, + .fill_range = iwl_dump_ini_prph_mac_iter, + }, + [IWL_FW_INI_REGION_PERIPHERY_PHY] = { + .get_num_of_ranges = iwl_dump_ini_mem_ranges, + .get_size = iwl_dump_ini_mem_get_size, + .fill_mem_hdr = iwl_dump_ini_mem_fill_header, + .fill_range = iwl_dump_ini_prph_phy_iter, }, - [IWL_FW_INI_REGION_PERIPHERY_PHY] = {}, [IWL_FW_INI_REGION_PERIPHERY_AUX] = {}, [IWL_FW_INI_REGION_PAGING] = { .fill_mem_hdr = iwl_dump_ini_mem_fill_header, @@ -2188,6 +2194,7 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, struct list_head *list) { struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; + enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point); struct iwl_dump_ini_region_data reg_data = { .dump_data = dump_data, }; @@ -2218,6 +2225,14 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; + if (reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY && + tp_id != IWL_FW_INI_TIME_POINT_FW_ASSERT) { + IWL_WARN(fwrt, + "WRT: trying to collect phy prph at time point: %d, skipping\n", + tp_id); + continue; + } + size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[reg_type]); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 11558df36b94..49fa2f5f8c7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_dbg_h__ #define __iwl_fw_dbg_h__ #include <linux/workqueue.h> diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 267ad4eddb5c..a152ce306475 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include "api/commands.h" #include "debugfs.h" #include "dbg.h" @@ -200,6 +145,34 @@ static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt) return iwl_trans_send_cmd(fwrt->trans, &hcmd); } +static int iwl_dbgfs_enabled_severities_write(struct iwl_fw_runtime *fwrt, + char *buf, size_t count) +{ + struct iwl_dbg_host_event_cfg_cmd event_cfg; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(HOST_EVENT_CFG, DEBUG_GROUP, 0), + .flags = CMD_ASYNC, + .data[0] = &event_cfg, + .len[0] = sizeof(event_cfg), + }; + u32 enabled_severities; + int ret = kstrtou32(buf, 10, &enabled_severities); + + if (ret < 0) + return ret; + + event_cfg.enabled_severities = cpu_to_le32(enabled_severities); + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + IWL_INFO(fwrt, + "sent host event cfg with enabled_severities: %u, ret: %d\n", + enabled_severities, ret); + + return ret ?: count; +} + +FWRT_DEBUGFS_WRITE_FILE_OPS(enabled_severities, 16); + static void iwl_fw_timestamp_marker_wk(struct work_struct *work) { int ret; @@ -431,5 +404,6 @@ void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(fw_info, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); + FWRT_DEBUGFS_ADD_FILE(enabled_severities, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0400); } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index fde40ff88451..0248d40bc233 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include "runtime.h" #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index cb40f509ab61..9fffac903b93 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2014, 2018-2020 Intel Corporation + * Copyright (C) 2014-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __fw_error_dump_h__ #define __fw_error_dump_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 02c64b988a13..597bc88479ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2008-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fw_file_h__ #define __iwl_fw_file_h__ @@ -220,7 +165,7 @@ struct iwl_ucode_capa { * treats good CRC threshold as a boolean * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD - * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan + * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of block list instead of 64 in scan * offload profile config command. * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * (rather than two) IPv6 addresses @@ -288,7 +233,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S. * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of * STA_CONTEXT_DOT11AX_API_S - * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar + * @IWL_UCODE_TLV_API_SAR_TABLE_VER: This ucode supports different sar * version tables. * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of * SCAN_CONFIG_DB_CMD_API_S. @@ -414,6 +359,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * initiator and responder * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames + * @IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE: Supports the firmware handshake in + * reset flow * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -459,6 +406,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54, IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56, + IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57, /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, @@ -486,6 +434,11 @@ enum iwl_ucode_tlv_capa { /* set 3 */ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, + /* + * @IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT: supports PSC channels + */ + IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98, + NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ /* sparse says it cannot increment the previous enum member */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index f836f3a8567b..c93d247621ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016 Intel Deutschland GmbH + */ #ifndef __iwl_fw_img_h__ #define __iwl_fw_img_h__ #include <linux/types.h> diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index f8516c7ca767..e317b051b8ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2019-2020 Intel Corporation + */ #include "iwl-drv.h" #include "runtime.h" #include "dbg.h" diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c index 379735e086dc..3dbc6f3f92cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #include <linux/sched.h> #include <linux/export.h> diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h index 61b067eeeac9..49e8ba11b6a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h @@ -1,61 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #ifndef __iwl_notif_wait_h__ #define __iwl_notif_wait_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 2bd76bd9dfa5..4a8fe9641a32 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include "iwl-drv.h" #include "runtime.h" #include "fw/api/commands.h" diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index cddcb4d9a264..0dba5444f2db 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ + */ #ifndef __iwl_fw_runtime_h__ #define __iwl_fw_runtime_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 700fdab14209..f2f1789f470d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include "iwl-drv.h" #include "runtime.h" #include "fw/api/commands.h" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h index 359537620c93..ee85f460ae25 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + */ /* * Please use this file (iwl-agn-hw.h) only for hardware-related definitions. */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 580b07a43856..27cb0406ba9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright (C) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright (C) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -376,7 +322,7 @@ struct iwl_fw_mon_regs { * mode set * @nvm_hw_section_num: the ID of the HW NVM section * @mac_addr_from_csr: read HW address from CSR registers - * @features: hw features, any combination of feature_whitelist + * @features: hw features, any combination of feature_passlist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the @@ -472,6 +418,7 @@ struct iwl_cfg { #define IWL_CFG_MAC_TYPE_QU 0x33 #define IWL_CFG_MAC_TYPE_QUZ 0x35 #define IWL_CFG_MAC_TYPE_QNJ 0x36 +#define IWL_CFG_MAC_TYPE_SNJ 0x42 #define IWL_CFG_MAC_TYPE_MA 0x44 #define IWL_CFG_RF_TYPE_TH 0x105 @@ -653,6 +600,7 @@ extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0; extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0; extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0; extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0; +extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index 5624fe42efd9..2be605cc6fbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -1,53 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2018, 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2018, 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2018, 2020 Intel Corporation + */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ @@ -98,6 +52,7 @@ enum iwl_prph_scratch_mtr_format { * appropriately; use the below values for this. * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size + * @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size */ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), @@ -111,6 +66,7 @@ enum iwl_prph_scratch_flags { IWL_PRPH_SCRATCH_RB_SIZE_EXT_MASK = 0xf << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20, IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20, + IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20, }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h index 76b7bbdf8393..4354d5acac9f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -1,56 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #ifndef __iwl_context_info_file_h__ #define __iwl_context_info_file_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 1d48c7d7fffd..fe0c03c8d390 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2016 Intel Deutschland GmbH + */ #ifndef __iwl_csr_h__ #define __iwl_csr_h__ /* @@ -638,7 +582,8 @@ enum msix_fh_int_causes { enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), - MSIX_HW_INT_CAUSES_REG_IML = BIT(2), + MSIX_HW_INT_CAUSES_REG_IML = BIT(1), + MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 51ce93d21ffe..a654147d3cd6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -1,64 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright (C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright (C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2018-2020 Intel Corporation + */ #include <linux/firmware.h> #include "iwl-drv.h" #include "iwl-trans.h" @@ -808,7 +751,7 @@ static bool is_trig_data_contained(struct iwl_ucode_tlv *new, struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data; __le32 *new_data = new_trig->data, *old_data = old_trig->data; u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data); - u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data); + u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data); int i, j; for (i = 0; i < new_dwords_num; i++) { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h index 1360676b3b21..246823878281 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -1,63 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright (C) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright (C) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2018-2019 Intel Corporation + */ #ifndef __iwl_dbg_tlv_h__ #define __iwl_dbg_tlv_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c index 7df173cc9ddc..f6ca2fc37c40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c @@ -1,61 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2011 Intel Corporation + */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/export.h> diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 9dcd2e990c9c..d44bc61c34f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> @@ -1579,7 +1524,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) break; default: WARN(1, "Invalid fw type %d\n", fw->type); - /* fall through */ + fallthrough; case IWL_FW_MVM: op = &iwlwifi_opmode_table[MVM_OP_MODE]; break; @@ -1835,7 +1780,7 @@ MODULE_PARM_DESC(11n_disable, module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, "amsdu size 0: 12K for multi Rx queue devices, 2K for AX210 devices, " - "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)"); + "4K for other devices 1:4K 2:8K 3:12K (16K buffers) 4: 2K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h index 8938a6467996..b6442df0c643 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014, 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014, 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + */ #ifndef __iwl_drv_h__ #define __iwl_drv_h__ #include <linux/export.h> diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index cf7e2a9232e5..c21062777caf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2019 Intel Corporation + * Copyright (C) 2015 Intel Mobile Communications GmbH + */ #include <linux/types.h> #include <linux/slab.h> #include <linux/export.h> @@ -324,15 +269,15 @@ enum iwl_eeprom_enhanced_txpwr_flags { }; /** - * iwl_eeprom_enhanced_txpwr structure + * struct iwl_eeprom_enhanced_txpwr * @flags: entry flags * @channel: channel number - * @chain_a_max_pwr: chain a max power in 1/2 dBm - * @chain_b_max_pwr: chain b max power in 1/2 dBm - * @chain_c_max_pwr: chain c max power in 1/2 dBm + * @chain_a_max: chain a max power in 1/2 dBm + * @chain_b_max: chain b max power in 1/2 dBm + * @chain_c_max: chain c max power in 1/2 dBm * @delta_20_in_40: 20-in-40 deltas (hi/lo) - * @mimo2_max_pwr: mimo2 max power in 1/2 dBm - * @mimo3_max_pwr: mimo3 max power in 1/2 dBm + * @mimo2_max: mimo2 max power in 1/2 dBm + * @mimo3_max: mimo3 max power in 1/2 dBm * * This structure presents the enhanced regulatory tx power limit layout * in an EEPROM image. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 03a748cc98fa..0efffb6eeb1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright (C) 2018 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright (C) 2018 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018 Intel Corporation + * Copyright (C) 2015 Intel Mobile Communications GmbH + */ #ifndef __iwl_eeprom_parse_h__ #define __iwl_eeprom_parse_h__ @@ -99,6 +44,17 @@ struct iwl_nvm_data { bool lar_enabled; bool vht160_supported; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; + + /* + * iftype data for low (2.4 GHz) and high (5 and 6 GHz) bands, + * we can use the same for 5 and 6 GHz bands because they have + * the same data + */ + struct { + struct ieee80211_sband_iftype_data low[2]; + struct ieee80211_sband_iftype_data high[2]; + } iftd; + struct ieee80211_channel channels[]; }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index ad6dc4497437..dbab2f10d750 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -1,61 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2019 Intel Corporation + */ #include <linux/types.h> #include <linux/slab.h> #include <linux/export.h> @@ -335,7 +281,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans, return -EINVAL; } -/** +/* * iwl_read_eeprom - read EEPROM contents * * Load the EEPROM contents from adapter and return it diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h index 47fced159800..63b8e6c6659b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + */ #ifndef __iwl_eeprom_h__ #define __iwl_eeprom_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index e77d8d13cb51..aaa3b65be4e6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -1,64 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #ifndef __iwl_fh_h__ #define __iwl_fh_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 2139f0b8f2bb..2ac20d0a30eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,64 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(C) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2015-2016 Intel Deutschland GmbH + */ #include <linux/delay.h> #include <linux/device.h> #include <linux/export.h> @@ -279,7 +223,7 @@ void iwl_force_nmi(struct iwl_trans *trans) DEVICE_SET_NMI_VAL_DRV); else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, - UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); + UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER); else iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, UREG_DOORBELL_TO_ISR6_NMI_BIT); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index f09e368c7040..39bceee4e9e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright (C) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright (C) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2018-2019 Intel Corporation + */ #ifndef __iwl_io_h__ #define __iwl_io_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index e8ce3a300857..004ebdac4535 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + */ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 6d19de3058d2..720193d16539 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/types.h> #include <linux/slab.h> #include <linux/export.h> @@ -107,9 +51,11 @@ enum ext_nvm_offsets { /* NVM SW-Section offset (in words) definitions */ NVM_VERSION_EXT_NVM = 0, + N_HW_ADDRS_FAMILY_8000 = 3, + + /* NVM PHY_SKU-Section offset (in words) definitions */ RADIO_CFG_FAMILY_EXT_NVM = 0, SKU_FAMILY_8000 = 2, - N_HW_ADDRS_FAMILY_8000 = 3, /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_EXTENDED = 0, @@ -166,6 +112,7 @@ static const u16 iwl_uhb_nvm_channels[] = { #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) #define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels) #define NUM_2GHZ_CHANNELS 14 +#define NUM_5GHZ_CHANNELS 37 #define FIRST_2GHZ_HT_MINUS 5 #define LAST_2GHZ_HT_PLUS 9 #define N_HW_ADDR_MASK 0xF @@ -389,6 +336,10 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx) { + if (ch_idx >= NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS) { + return NL80211_BAND_6GHZ; + } + if (ch_idx >= NUM_2GHZ_CHANNELS) return NL80211_BAND_5GHZ; return NL80211_BAND_2GHZ; @@ -480,6 +431,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, else channel->flags = 0; + /* TODO: Don't put limitations on UHB devices as we still don't + * have NVM for them + */ + if (cfg->uhb_supported) + channel->flags = 0; iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, channel->hw_value, ch_flags); IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", @@ -580,7 +536,7 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans, cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } -static struct ieee80211_sband_iftype_data iwl_he_capa[] = { +static const struct ieee80211_sband_iftype_data iwl_he_capa[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { @@ -743,12 +699,82 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { }, }; +static void iwl_init_he_6ghz_capa(struct iwl_trans *trans, + struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + u8 tx_chains, u8 rx_chains) +{ + struct ieee80211_sta_ht_cap ht_cap; + struct ieee80211_sta_vht_cap vht_cap = {}; + struct ieee80211_sband_iftype_data *iftype_data; + u16 he_6ghz_capa = 0; + u32 exp; + int i; + + if (sband->band != NL80211_BAND_6GHZ) + return; + + /* grab HT/VHT capabilities and calculate HE 6 GHz capabilities */ + iwl_init_ht_hw_capab(trans, data, &ht_cap, NL80211_BAND_5GHZ, + tx_chains, rx_chains); + WARN_ON(!ht_cap.ht_supported); + iwl_init_vht_hw_capab(trans, data, &vht_cap, tx_chains, rx_chains); + WARN_ON(!vht_cap.vht_supported); + + he_6ghz_capa |= + u16_encode_bits(ht_cap.ampdu_density, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + exp = u32_get_bits(vht_cap.cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + he_6ghz_capa |= + u16_encode_bits(exp, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + exp = u32_get_bits(vht_cap.cap, IEEE80211_VHT_CAP_MAX_MPDU_MASK); + he_6ghz_capa |= + u16_encode_bits(exp, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); + /* we don't support extended_ht_cap_info anywhere, so no RD_RESPONDER */ + if (vht_cap.cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) + he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; + if (vht_cap.cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) + he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; + + IWL_DEBUG_EEPROM(trans->dev, "he_6ghz_capa=0x%x\n", he_6ghz_capa); + + /* we know it's writable - we set it before ourselves */ + iftype_data = (void *)sband->iftype_data; + for (i = 0; i < sband->n_iftype_data; i++) + iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa); +} + static void iwl_init_he_hw_capab(struct iwl_trans *trans, struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, u8 tx_chains, u8 rx_chains) { - sband->iftype_data = iwl_he_capa; + struct ieee80211_sband_iftype_data *iftype_data; + + /* should only initialize once */ + if (WARN_ON(sband->iftype_data)) + return; + + BUILD_BUG_ON(sizeof(data->iftd.low) != sizeof(iwl_he_capa)); + BUILD_BUG_ON(sizeof(data->iftd.high) != sizeof(iwl_he_capa)); + + switch (sband->band) { + case NL80211_BAND_2GHZ: + iftype_data = data->iftd.low; + break; + case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: + iftype_data = data->iftd.high; + break; + default: + WARN_ON(1); + return; + } + + memcpy(iftype_data, iwl_he_capa, sizeof(iwl_he_capa)); + + sband->iftype_data = iftype_data; sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ @@ -756,14 +782,15 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans, int i; for (i = 0; i < sband->n_iftype_data; i++) { - iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &= + iftype_data[i].he_cap.he_cap_elem.phy_cap_info[1] &= ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; - iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &= + iftype_data[i].he_cap.he_cap_elem.phy_cap_info[2] &= ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; - iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &= + iftype_data[i].he_cap.he_cap_elem.phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; } } + iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains); } static void iwl_init_sbands(struct iwl_trans *trans, @@ -806,6 +833,19 @@ static void iwl_init_sbands(struct iwl_trans *trans, if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains); + /* 6GHz band. */ + sband = &data->bands[NL80211_BAND_6GHZ]; + sband->band = NL80211_BAND_6GHZ; + /* use the same rates as 5GHz band */ + sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; + sband->n_bitrates = N_RATES_52; + n_used += iwl_init_sband_channels(data, sband, n_channels, + NL80211_BAND_6GHZ); + + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains); + else + sband->n_channels = 0; if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 50bd7fdcf852..aca1ccdd1aa4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2015, 2018-2020 Intel Corporation + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_nvm_parse_h__ #define __iwl_nvm_parse_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index b35b8920941b..9097fe310693 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 Intel Deutschland GmbH + */ #ifndef __iwl_op_mode_h__ #define __iwl_op_mode_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c index ae83cfdb750e..5378315d0179 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2020 Intel Corporation + * Copyright (C) 2016 Intel Deutschland GmbH + */ #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> @@ -79,11 +25,11 @@ struct iwl_phy_db_entry { * * @cfg: phy configuration. * @calib_nch: non channel specific calibration data. - * @calib_ch: channel specific calibration data. * @n_group_papd: number of entries in papd channel group. * @calib_ch_group_papd: calibration data related to papd channel group. * @n_group_txp: number of entries in tx power channel group. * @calib_ch_group_txp: calibration data related to tx power chanel group. + * @trans: transport layer */ struct iwl_phy_db { struct iwl_phy_db_entry cfg; @@ -201,13 +147,23 @@ IWL_EXPORT_SYMBOL(iwl_phy_db_free); int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt) { + unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_calib_res_notif_phy_db *phy_db_notif = (struct iwl_calib_res_notif_phy_db *)pkt->data; - enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type); - u16 size = le16_to_cpu(phy_db_notif->length); + enum iwl_phy_db_section_type type; + u16 size; struct iwl_phy_db_entry *entry; u16 chg_id = 0; + if (pkt_len < sizeof(*phy_db_notif)) + return -EINVAL; + + type = le16_to_cpu(phy_db_notif->type); + size = le16_to_cpu(phy_db_notif->length); + + if (pkt_len < sizeof(*phy_db_notif) + size) + return -EINVAL; + if (!phy_db) return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h index 7020dca05221..0b2fdfa94fcf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h @@ -1,61 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014 Intel Corporation + */ #ifndef __IWL_PHYDB_H__ #define __IWL_PHYDB_H__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index fa3f15778fc7..0b03fdedc1f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016 Intel Deutschland GmbH + */ #ifndef __iwl_prph_h__ #define __iwl_prph_h__ #include <linux/bitfield.h> @@ -109,7 +54,8 @@ #define DEVICE_SET_NMI_VAL_DRV BIT(7) /* Device NMI register and value for 9000 family and above hw's */ #define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 -#define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK 0xff000000 +#define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER BIT(24) +#define UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE (BIT(24) | BIT(25)) /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 @@ -406,10 +352,6 @@ enum { #define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890 #define CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR 0xA29938 -enum { - HW_STEP_LOCATION_BITS = 24, -}; - #define PREG_AUX_BUS_WPROT_0 0xA04CC0 /* device family 9000 WPROT register */ @@ -458,6 +400,7 @@ enum { #define UREG_DOORBELL_TO_ISR6 0xA05C04 #define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) +#define UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE (BIT(0) | BIT(1)) #define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18) #define UREG_DOORBELL_TO_ISR6_RESUME BIT(19) #define UREG_DOORBELL_TO_ISR6_PNVM BIT(20) @@ -473,4 +416,22 @@ enum { #define IWL_D3_SLEEP_STATUS_SUSPEND 0xD3 #define IWL_D3_SLEEP_STATUS_RESUME 0xD0 + +#define WMAL_INDRCT_RD_CMD1_OPMOD_POS 28 +#define WMAL_INDRCT_RD_CMD1_BYTE_ADDRESS_MSK 0xFFFFF +#define WMAL_CMD_READ_BURST_ACCESS 2 +#define WMAL_MRSPF_1 0xADFC20 +#define WMAL_INDRCT_RD_CMD1 0xADFD44 +#define WMAL_INDRCT_CMD1 0xADFC14 +#define WMAL_INDRCT_CMD(addr) \ + ((WMAL_CMD_READ_BURST_ACCESS << WMAL_INDRCT_RD_CMD1_OPMOD_POS) | \ + ((addr) & WMAL_INDRCT_RD_CMD1_BYTE_ADDRESS_MSK)) + +#define WFPM_LMAC1_PS_CTL_RW 0xA03380 +#define WFPM_LMAC2_PS_CTL_RW 0xA033C0 +#define WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK 0x0000000F +#define WFPM_PHYRF_STATE_ON 5 +#define HBUS_TIMEOUT 0xA5A5A5A1 +#define WFPM_DPHY_OFF 0xDF10FF + #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h index 9f11f3912816..1eaff898ca3c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h @@ -1,61 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2014 Intel Mobile Communications GmbH + */ #ifndef __iwl_scd_h__ #define __iwl_scd_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index becee92a5fd6..cc76826da5d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + * Copyright (C) 2019-2020 Intel Corporation + */ #include <linux/kernel.h> #include <linux/bsearch.h> diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 11a040e75bf3..2d65bb82f7fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_trans_h__ #define __iwl_trans_h__ @@ -373,7 +316,7 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) case IWL_AMSDU_8K: return get_order(8 * 1024); case IWL_AMSDU_12K: - return get_order(12 * 1024); + return get_order(16 * 1024); default: WARN_ON(1); return -1; @@ -391,7 +334,7 @@ iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size) case IWL_AMSDU_8K: return 8 * 1024; case IWL_AMSDU_12K: - return 12 * 1024; + return 16 * 1024; default: WARN_ON(1); return 0; @@ -432,12 +375,12 @@ struct iwl_hcmd_arr { * @bc_table_dword: set to true if the BC table expects the byte count to be * in DWORD (as opposed to bytes) * @scd_set_active: should the transport configure the SCD for HCMD queue - * @sw_csum_tx: transport should compute the TCP checksum * @command_groups: array of command groups, each member is an array of the * commands in the group; for debugging only * @command_groups_size: number of command groups, to avoid illegal access * @cb_data_offs: offset inside skb->cb to store transport data at, must have * space for at least two pointers + * @fw_reset_handshake: firmware supports reset flow handshake */ struct iwl_trans_config { struct iwl_op_mode *op_mode; @@ -451,11 +394,11 @@ struct iwl_trans_config { enum iwl_amsdu_size rx_buf_size; bool bc_table_dword; bool scd_set_active; - bool sw_csum_tx; const struct iwl_hcmd_arr *command_groups; int command_groups_size; u8 cb_data_offs; + bool fw_reset_handshake; }; struct iwl_trans_dump_data { @@ -649,12 +592,14 @@ struct iwl_trans_ops { /** * enum iwl_trans_state - state of the transport layer * - * @IWL_TRANS_NO_FW: no fw has sent an alive response - * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response + * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed + * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet + * @IWL_TRANS_FW_ALIVE: FW has sent an alive response */ enum iwl_trans_state { - IWL_TRANS_NO_FW = 0, - IWL_TRANS_FW_ALIVE = 1, + IWL_TRANS_NO_FW, + IWL_TRANS_FW_STARTED, + IWL_TRANS_FW_ALIVE, }; /** @@ -1084,12 +1029,18 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { + int ret; + might_sleep(); WARN_ON_ONCE(!trans->rx_mpdu_cmd); clear_bit(STATUS_FW_ERROR, &trans->status); - return trans->ops->start_fw(trans, fw, run_in_rfkill); + ret = trans->ops->start_fw(trans, fw, run_in_rfkill); + if (ret == 0) + trans->state = IWL_TRANS_FW_STARTED; + + return ret; } static inline void iwl_trans_stop_device(struct iwl_trans *trans) @@ -1441,8 +1392,10 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) return; /* prevent double restarts due to the same erroneous FW */ - if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) + if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) { iwl_op_mode_nic_error(trans->op_mode); + trans->state = IWL_TRANS_NO_FW; + } } static inline bool iwl_trans_fw_running(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c index 5e731c57e4f7..0aac306304cb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2016 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2020 Intel Corporation + * Copyright (C) 2016 Intel Deutschland GmbH + */ #include <net/mac80211.h> #include "fw-api.h" #include "mvm.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 5ae22cd7ecdb..9b194cb8d65e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2013-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + */ #include <linux/ieee80211.h> #include <linux/etherdevice.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 2487871eac73..617b41ee5801 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2013-2014, 2018-2020 Intel Corporation + * Copyright (C) 2015 Intel Deutschland GmbH + */ #ifndef __MVM_CONSTANTS_H #define __MVM_CONSTANTS_H @@ -107,7 +50,6 @@ #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 -#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 @@ -165,5 +107,6 @@ #define IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT 20016 #define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016 #define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2 +#define IWL_MVM_DISABLE_AP_FILS false #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index d21143495e70..c025188fa9bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/etherdevice.h> #include <linux/ip.h> #include <linux/fs.h> @@ -955,7 +899,6 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *nd_config, struct ieee80211_vif *vif) { - struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; int ret; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); @@ -975,19 +918,6 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, return ret; } - /* rfkill release can be either for wowlan or netdetect */ - if (wowlan->rfkill_release) - wowlan_config_cmd.wakeup_filter |= - cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); - - wowlan_config_cmd.sta_id = mvm->aux_sta.sta_id; - - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, - sizeof(wowlan_config_cmd), - &wowlan_config_cmd); - if (ret) - return ret; - ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies, IWL_MVM_SCAN_NETDETECT); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index f043eefabb4e..573e46956c14 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include "mvm.h" #include "debugfs.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 3395c4675988..80f848a9ee13 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/vmalloc.h> #include <linux/ieee80211.h> #include <linux/netdevice.h> @@ -381,10 +326,10 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "Use geographic profile %d\n", tbl_idx); pos += scnprintf(buf + pos, bufsz - pos, - "2.4GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n", + "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", value[1], value[2], value[0]); pos += scnprintf(buf + pos, bufsz - pos, - "5.2GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n", + "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n", value[4], value[5], value[3]); } mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h index a83d252c0602..0711ab689c48 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h @@ -1,63 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + */ #define MVM_DEBUGFS_READ_FILE_OPS(name) \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index a0ce761d0c59..a4fd0bf9ba19 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -1,66 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * Copyright (C) 2019 Intel Corporation - * Copyright (C) 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * Copyright (C) 2019 Intel Corporation - * Copyright (C) 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include <linux/etherdevice.h> #include <linux/math64.h> #include <net/cfg80211.h> @@ -1035,9 +977,44 @@ iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, } } +static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm) +{ + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ)) + return 5; + + /* Starting from version 8, the FW advertises the version */ + if (mvm->cmd_ver.range_resp >= 8) + return mvm->cmd_ver.range_resp; + else if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) + return 7; + + /* The first version of the new range request API */ + return 6; +} + +static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) +{ + switch (ver) { + case 8: + return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); + case 7: + return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); + case 6: + return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6); + case 5: + return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5); + default: + WARN_ONCE(1, "FTM: unsupported range response version %u", ver); + return false; + } +} + void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); + unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; @@ -1046,6 +1023,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); u8 num_of_aps, last_in_batch; + u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm); lockdep_assert_held(&mvm->mutex); @@ -1053,6 +1031,9 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) return; } + if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len))) + return; + if (new_api) { if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id, fw_resp_v8->num_of_aps)) @@ -1079,11 +1060,10 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) int peer_idx; if (new_api) { - if (mvm->cmd_ver.range_resp == 8) { + if (notif_ver == 8) { fw_ap = &fw_resp_v8->ap[i]; iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap); - } else if (fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) { + } else if (notif_ver == 7) { fw_ap = (void *)&fw_resp_v7->ap[i]; } else { fw_ap = (void *)&fw_resp_v6->ap[i]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index dd3662b9a5bc..996f45c19f10 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2015-2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include <net/cfg80211.h> #include <linux/etherdevice.h> #include "mvm.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 05a06f88db6c..d7ca1f98883b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __fw_api_h__ #define __fw_api_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 6385b9641126..0637eb1cff4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <net/mac80211.h> #include <linux/netdevice.h> @@ -215,6 +160,7 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { + unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); struct iwl_mvm *mvm = container_of(notif_wait, struct iwl_mvm, notif_wait); struct iwl_mvm_alive_data *alive_data = data; @@ -232,6 +178,9 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, UCODE_ALIVE_NTFY, 0) == 5) { struct iwl_alive_ntf_v5 *palive; + if (pkt_len < sizeof(*palive)) + return false; + palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; @@ -249,6 +198,9 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) { struct iwl_alive_ntf_v4 *palive; + if (pkt_len < sizeof(*palive)) + return false; + palive = (void *)pkt->data; umac = &palive->umac_data; lmac1 = &palive->lmac_data[0]; @@ -258,6 +210,9 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, sizeof(struct iwl_alive_ntf_v3)) { struct iwl_alive_ntf_v3 *palive3; + if (pkt_len < sizeof(*palive3)) + return false; + palive3 = (void *)pkt->data; umac = &palive3->umac_data; lmac1 = &palive3->lmac_data; @@ -457,10 +412,18 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, iwl_fw_set_dbg_rec_on(&mvm->fwrt); #endif + /* + * All the BSSes in the BSS table include the GP2 in the system + * at the beacon Rx time, this is of course no longer relevant + * since we are resetting the firmware. + * Purge all the BSS table. + */ + cfg80211_bss_flush(mvm->hw->wiphy); + return 0; } -static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait init_wait; struct iwl_nvm_access_complete_cmd nvm_complete = {}; @@ -517,7 +480,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) iwl_mvm_load_nvm_to_nic(mvm); } - if (IWL_MVM_PARSE_NVM && read_nvm) { + if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); @@ -542,7 +505,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) return ret; /* Read the NVM only at driver load time, no need to do this twice */ - if (!IWL_MVM_PARSE_NVM && read_nvm) { + if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); @@ -647,7 +610,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) cmd_size, &phy_cfg_cmd); } -int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) +int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm) { struct iwl_notification_wait calib_wait; static const u16 init_complete[] = { @@ -657,7 +620,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) int ret; if (iwl_mvm_has_unified_ucode(mvm)) - return iwl_run_unified_mvm_ucode(mvm, true); + return iwl_run_unified_mvm_ucode(mvm); lockdep_assert_held(&mvm->mutex); @@ -684,7 +647,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) } /* Read the NVM only at driver load time, no need to do this twice */ - if (read_nvm) { + if (!mvm->nvm_data) { ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); @@ -1096,7 +1059,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) struct iwl_tas_config_cmd cmd = {}; int list_size; - BUILD_BUG_ON(ARRAY_SIZE(cmd.black_list_array) < + BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) < APCI_WTAS_BLACK_LIST_MAX); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { @@ -1104,7 +1067,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; } - ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.black_list_array, &list_size); + ret = iwl_acpi_get_tas(&mvm->fwrt, cmd.block_list_array, &list_size); if (ret < 0) { IWL_DEBUG_RADIO(mvm, "TAS table invalid or unavailable. (%d)\n", @@ -1116,7 +1079,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) return; /* list size if TAS enabled can only be non-negative */ - cmd.black_list_size = cpu_to_le32((u32)list_size); + cmd.block_list_size = cpu_to_le32((u32)list_size); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG), @@ -1330,9 +1293,10 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) int ret; if (iwl_mvm_has_unified_ucode(mvm)) - return iwl_run_unified_mvm_ucode(mvm, false); + return iwl_run_unified_mvm_ucode(mvm); - ret = iwl_run_init_mvm_ucode(mvm, false); + WARN_ON(!mvm->nvm_data); + ret = iwl_run_init_mvm_ucode(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 72c4b2b8399d..1ea7c44250d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -1,65 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2017 Intel Deutschland GmbH + */ #include <linux/leds.h> #include "iwl-io.h" #include "iwl-csr.h" @@ -115,7 +58,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) switch (mode) { case IWL_LED_BLINK: IWL_ERR(mvm, "Blink led mode not supported, used default\n"); - /* fall through */ + fallthrough; case IWL_LED_DEFAULT: case IWL_LED_RF_STATE: mode = IWL_LED_RF_STATE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index cbdebefb854a..9078fcb5286c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1,66 +1,11 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #include <linux/etherdevice.h> +#include <linux/crc32.h> #include <net/mac80211.h> #include "iwl-io.h" #include "iwl-prph.h" @@ -115,12 +60,12 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, * client in the system. * * The firmware will decide according to the MAC type which - * will be the master and slave. Clients that need to sync - * with a remote station will be the master, and an AP or GO - * will be the slave. + * will be the leader and follower. Clients that need to sync + * with a remote station will be the leader, and an AP or GO + * will be the follower. * - * Depending on the new interface type it can be slaved to - * or become the master of an existing interface. + * Depending on the new interface type it can be following + * or become the leader of an existing interface. */ switch (data->vif->type) { case NL80211_IFTYPE_STATION: @@ -287,7 +232,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) case NL80211_IFTYPE_STATION: if (!vif->p2p) break; - /* fall through */ + fallthrough; default: __clear_bit(0, data.available_mac_ids); } @@ -980,12 +925,28 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct iwl_mac_beacon_cmd beacon_cmd = {}; u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); u16 flags; + struct ieee80211_chanctx_conf *ctx; + int channel; flags = iwl_mvm_mac80211_idx_to_hwrate(rate); if (rate == IWL_FIRST_CCK_RATE) flags |= IWL_MAC_BEACON_CCK; + /* Enable FILS on PSC channels only */ + rcu_read_lock(); + ctx = rcu_dereference(vif->chanctx_conf); + channel = ieee80211_frequency_to_channel(ctx->def.chan->center_freq); + WARN_ON(channel == 0); + if (cfg80211_channel_is_psc(ctx->def.chan) && + !IWL_MVM_DISABLE_AP_FILS) { + flags |= IWL_MAC_BEACON_FILS; + beacon_cmd.short_ssid = + cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid, + vif->bss_conf.ssid_len)); + } + rcu_read_unlock(); + beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); @@ -1202,13 +1163,11 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, force_assoc_off, bssid_override); - break; case NL80211_IFTYPE_AP: if (!vif->p2p) return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action); else return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action); - break; case NL80211_IFTYPE_MONITOR: return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action); case NL80211_IFTYPE_P2P_DEVICE: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b627e7da7ac9..da32937ba9a7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/skbuff.h> @@ -566,6 +511,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; hw->wiphy->n_iface_combinations = @@ -619,6 +565,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && + mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) + hw->wiphy->bands[NL80211_BAND_6GHZ] = + &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; hw->wiphy->hw_version = mvm->trans->hw_id; @@ -748,13 +699,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) } hw->netdev_features |= mvm->cfg->features; - if (!iwl_mvm_is_csum_supported(mvm)) { + if (!iwl_mvm_is_csum_supported(mvm)) hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM); - /* We may support SW TX CSUM */ - if (IWL_MVM_SW_TX_CSUM_OFFLOAD) - hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; - } if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, @@ -1353,12 +1300,6 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); - if (mvmvif->csa_failed) { - mvmvif->csa_failed = false; - ret = -EIO; - goto out_unlock; - } - if (vif->type == NL80211_IFTYPE_STATION) { struct iwl_mvm_sta *mvmsta; @@ -1390,6 +1331,8 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, ret = iwl_mvm_power_update_ps(mvm); out_unlock: + if (mvmvif->csa_failed) + ret = -EIO; mutex_unlock(&mvm->mutex); return ret; @@ -1417,9 +1360,10 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); + mvmvif->csa_failed = true; mutex_unlock(&mvm->mutex); - WARN_ON(iwl_mvm_post_channel_switch(hw, vif)); + iwl_mvm_post_channel_switch(hw, vif); } static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) @@ -2279,9 +2223,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, int ret; /* - * Re-calculate the tsf id, as the master-slave relations depend on the - * beacon interval, which was not known when the station interface was - * added. + * Re-calculate the tsf id, as the leader-follower relations depend + * on the beacon interval, which was not known when the station + * interface was added. */ if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { if (vif->bss_conf.he_support && @@ -2499,8 +2443,9 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, goto out_unlock; /* - * Re-calculate the tsf id, as the master-slave relations depend on the - * beacon interval, which was not known when the AP interface was added. + * Re-calculate the tsf id, as the leader-follower relations depend on + * the beacon interval, which was not known when the AP interface + * was added. */ if (vif->type == NL80211_IFTYPE_AP) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); @@ -3116,7 +3061,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * than 16. We can't avoid connecting at all, so refuse the * station state change, this will cause mac80211 to abandon * attempts to connect to this AP, and eventually wpa_s will - * blacklist the AP... + * blocklist the AP... */ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.beacon_int < 16) { @@ -4102,7 +4047,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, mvmvif->ap_ibss_active = true; break; } - /* fall through */ + fallthrough; case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow @@ -4603,6 +4548,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, break; case NL80211_IFTYPE_STATION: + if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) + schedule_delayed_work(&mvmvif->csa_work, 0); + if (chsw->block_tx) { /* * In case of undetermined / long time with immediate @@ -4676,12 +4624,17 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, } mvmvif->csa_count = chsw->count; - IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id); + mutex_lock(&mvm->mutex); + if (mvmvif->csa_failed) + goto out_unlock; + IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id); WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), - CMD_ASYNC, sizeof(cmd), &cmd)); + 0, sizeof(cmd), &cmd)); +out_unlock: + mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) @@ -5097,12 +5050,10 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, if (notif->sync) { notif->cookie = mvm->queue_sync_cookie; - atomic_set(&mvm->queue_sync_counter, - mvm->trans->num_rx_queues); + mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; } - ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, - size, !notif->sync); + ret = iwl_mvm_notify_rx_queue(mvm, qmask, notif, size, !notif->sync); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); goto out; @@ -5111,14 +5062,16 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, if (notif->sync) { lockdep_assert_held(&mvm->mutex); ret = wait_event_timeout(mvm->rx_sync_waitq, - atomic_read(&mvm->queue_sync_counter) == 0 || + READ_ONCE(mvm->queue_sync_state) == 0 || iwl_mvm_is_radio_killed(mvm), HZ); - WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm)); + WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), + "queue sync: failed to sync, state is 0x%lx\n", + mvm->queue_sync_state); } out: - atomic_set(&mvm->queue_sync_counter, 0); + mvm->queue_sync_state = 0; if (notif->sync) mvm->queue_sync_cookie++; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 7159d1da3e77..ed0e8b751737 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __IWL_MVM_H__ #define __IWL_MVM_H__ @@ -842,7 +786,7 @@ struct iwl_mvm { unsigned long status; u32 queue_sync_cookie; - atomic_t queue_sync_counter; + unsigned long queue_sync_state; /* * for beacon filtering - * currently only one interface can be supported @@ -1481,7 +1425,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm); * MVM Methods ******************/ /* uCode */ -int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm); +int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm); /* Utils */ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags, @@ -1615,7 +1559,8 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, - const u8 *data, u32 count, bool async); + const struct iwl_mvm_internal_rxq_notif *notif, + u32 notif_size, bool async); void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); @@ -1945,16 +1890,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } -static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) -{ - lockdep_assert_held(&mvm->mutex); - iwl_fw_cancel_timestamp(&mvm->fwrt); - clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - iwl_fw_dbg_stop_sync(&mvm->fwrt); - iwl_trans_stop_device(mvm->trans); - iwl_free_fw_paging(&mvm->fwrt); - iwl_fw_dump_conf_clear(&mvm->fwrt); -} +void iwl_mvm_stop_device(struct iwl_mvm *mvm); /* Re-configure the SCD for a queue that has already been configured */ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, @@ -2100,6 +2036,8 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band) return PHY_BAND_24; case NL80211_BAND_5GHZ: return PHY_BAND_5; + case NL80211_BAND_6GHZ: + return PHY_BAND_6; default: WARN_ONCE(1, "Unsupported band (%u)\n", band); return PHY_BAND_5; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 60296a754af2..abb8c1088c2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/firmware.h> #include <linux/rtnetlink.h> #include "iwl-trans.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c index 6d71e05626ad..1cc90e61367b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 Intel Deutschland GmbH + */ #include <net/ipv6.h> #include <net/addrconf.h> #include <linux/bitops.h> diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index f1c5b3a9c26f..98f62d78cf9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/module.h> #include <linux/vmalloc.h> #include <net/mac80211.h> @@ -315,6 +260,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF, iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, + iwl_mvm_probe_resp_data_notif, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF, + iwl_mvm_channel_switch_noa_notif, + RX_HANDLER_SYNC), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -703,7 +654,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, init_waitqueue_head(&mvm->rx_sync_waitq); - atomic_set(&mvm->queue_sync_counter, 0); + mvm->queue_sync_state = 0; SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); @@ -775,8 +726,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, driver_data[2]); - trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD; - /* Set a short watchdog for the command queue */ trans_cfg.cmd_q_wdg_timeout = iwl_mvm_get_wd_timeout(mvm, NULL, false, true); @@ -785,6 +734,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, sizeof(mvm->hw->wiphy->fw_version), "%s", fw->fw_version); + trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE); + /* Configure transport layer */ iwl_trans_configure(mvm->trans, &trans_cfg); @@ -822,7 +774,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, goto out_free; mutex_lock(&mvm->mutex); - err = iwl_run_init_mvm_ucode(mvm, true); + err = iwl_run_init_mvm_ucode(mvm); if (err && err != -ERFKILL) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!iwlmvm_mod_params.init_dbg || !err) @@ -876,6 +828,20 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } +void iwl_mvm_stop_device(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + iwl_fw_cancel_timestamp(&mvm->fwrt); + + clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + + iwl_fw_dbg_stop_sync(&mvm->fwrt); + iwl_trans_stop_device(mvm->trans); + iwl_free_fw_paging(&mvm->fwrt); + iwl_fw_dump_conf_clear(&mvm->fwrt); +} + static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); @@ -1298,6 +1264,12 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered && !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { + /* This should be first thing before trying to collect any + * data to avoid endless loops if any HW error happens while + * collecting debug data. + */ + set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); + if (mvm->fw->ucode_capa.error_log_size) { u32 src_size = mvm->fw->ucode_capa.error_log_size; u32 src_addr = mvm->fw->ucode_capa.error_log_addr; @@ -1316,7 +1288,6 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; - set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); ieee80211_restart_hw(mvm->hw); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index bf2fc44dcb8d..0fd51f6aa206 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH + */ #include <net/mac80211.h> #include "fw-api.h" #include "mvm.h" @@ -109,7 +52,7 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) return PHY_VHT_CTRL_POS_4_ABOVE; default: WARN(1, "Invalid channel definition"); - /* fall through */ + fallthrough; case 0: /* * The FW is expected to check the control channel position only diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index c146303ec73b..f2b090be3898 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index 5e62b97af48b..3d0166df2002 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <net/mac80211.h> #include "fw-api.h" #include "mvm.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index e0e80906fdc6..490a561c71db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -1,62 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include "rs.h" #include "fw-api.h" #include "sta.h" @@ -420,9 +366,21 @@ out: u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + if (mvmsta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { + switch (le16_get_bits(sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN)) { + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: + return IEEE80211_MAX_MPDU_LEN_VHT_11454; + case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991: + return IEEE80211_MAX_MPDU_LEN_VHT_7991; + default: + return IEEE80211_MAX_MPDU_LEN_VHT_3895; + } + } else if (vht_cap->vht_supported) { switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) { case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index ed7382e7ea17..91b6541d579f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -386,7 +386,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, const struct rs_rate *initial_rate); static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); -/** +/* * The following tables contain the expected throughput metrics for all rates * * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits @@ -396,7 +396,6 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search); * CCK rates are only valid in legacy table and will only be used in G * (2.4 GHz) band. */ - static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = { 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0 }; @@ -670,7 +669,7 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) return 0; } -/** +/* * rs_collect_tx_data - Update the success/failure sliding window * * We keep a sliding window of the last 62 packets transmitted @@ -2667,7 +2666,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, } } -/** +/* * rs_initialize_lq - Initialize a station's hardware rate table * * The uCode's station table contains a table of fallback rates @@ -3756,7 +3755,7 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) } #ifdef CONFIG_MAC80211_DEBUGFS -/** +/* * Program the device to use fixed rate for frame transmit * This is for debugging/testing only * once the device start use fixed rate, we need to reload the module @@ -4211,6 +4210,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, /** * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection + * @mvm: The mvm component * @mvmsta: The station * @enable: Enable Tx protection? */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 0059c83c2783..f0364add85f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <asm/unaligned.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> @@ -226,7 +170,7 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; - /* fall through */ + fallthrough; case RX_MPDU_RES_STATUS_SEC_WEP_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) @@ -349,15 +293,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_mpdu_res_start *rx_res; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; - u32 len; + u32 len, pkt_len = iwl_rx_packet_payload_len(pkt); u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; + if (unlikely(pkt_len < sizeof(*rx_res))) { + IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n"); + return; + } + phy_info = &mvm->last_phy_info; rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data; hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res)); len = le16_to_cpu(rx_res->byte_count); + + if (unlikely(len + sizeof(*rx_res) + sizeof(__le32) > pkt_len)) { + IWL_DEBUG_DROP(mvm, "FW lied about packet len\n"); + return; + } + rx_pkt_status = get_unaligned_le32((__le32 *) (pkt->data + sizeof(*rx_res) + len)); @@ -689,30 +644,20 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } -static void iwl_mvm_update_avg_energy(struct iwl_mvm *mvm, - u8 energy[IWL_MVM_STATION_COUNT_MAX]) +static void iwl_mvm_stats_energy_iter(void *_data, + struct ieee80211_sta *sta) { - int i; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + u8 *energy = _data; + u32 sta_id = mvmsta->sta_id; - if (WARN_ONCE(mvm->fw->ucode_capa.num_stations > - IWL_MVM_STATION_COUNT_MAX, - "Driver and FW station count mismatch %d\n", - mvm->fw->ucode_capa.num_stations)) + if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT_MAX, "sta_id %d >= %d", + sta_id, IWL_MVM_STATION_COUNT_MAX)) return; - rcu_read_lock(); - for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { - struct iwl_mvm_sta *sta; + if (energy[sta_id]) + mvmsta->avg_energy = energy[sta_id]; - if (!energy[i]) - continue; - - sta = iwl_mvm_sta_from_staid_rcu(mvm, i); - if (!sta) - continue; - sta->avg_energy = energy[i]; - } - rcu_read_unlock(); } static void @@ -762,7 +707,8 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, stats = (void *)&pkt->data; if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL || - stats->hdr.version != 1, + stats->hdr.version != + iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, STATISTICS_CMD, 0), "received unsupported hdr type %d, version %d\n", stats->hdr.type, stats->hdr.version)) return; @@ -793,8 +739,8 @@ iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm, for (i = 0; i < ARRAY_SIZE(average_energy); i++) average_energy[i] = le32_to_cpu(stats->average_energy[i]); - iwl_mvm_update_avg_energy(mvm, average_energy); - + ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, + average_energy); /* * Don't update in case the statistics are not cleared, since * we will end up counting twice the same airtime, once in TCM @@ -904,8 +850,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, bytes = (void *)&stats->load_stats.byte_count; air_time = (void *)&stats->load_stats.air_time; } - - iwl_mvm_update_avg_energy(mvm, energy); + ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter, + energy); /* * Don't update in case the statistics are not cleared, since diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 838734fec502..4dc7c65a1130 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #include <linux/etherdevice.h> #include <linux/skbuff.h> #include "iwl-trans.h" @@ -379,7 +323,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, stats->flag |= RX_FLAG_MMIC_ERROR; *crypt_len = IEEE80211_TKIP_IV_LEN; - /* fall through */ + fallthrough; case IWL_RX_MPDU_STATUS_SEC_WEP: if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) return -1; @@ -510,26 +454,27 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, } int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, - const u8 *data, u32 count, bool async) + const struct iwl_mvm_internal_rxq_notif *notif, + u32 notif_size, bool async) { u8 buf[sizeof(struct iwl_rxq_sync_cmd) + sizeof(struct iwl_mvm_rss_sync_notif)]; struct iwl_rxq_sync_cmd *cmd = (void *)buf; - u32 data_size = sizeof(*cmd) + count; + u32 data_size = sizeof(*cmd) + notif_size; int ret; /* * size must be a multiple of DWORD * Ensure we don't overflow buf */ - if (WARN_ON(count & 3 || - count > sizeof(struct iwl_mvm_rss_sync_notif))) + if (WARN_ON(notif_size & 3 || + notif_size > sizeof(struct iwl_mvm_rss_sync_notif))) return -EINVAL; cmd->rxq_mask = cpu_to_le32(rxq_mask); - cmd->count = cpu_to_le32(count); + cmd->count = cpu_to_le32(notif_size); cmd->flags = 0; - memcpy(cmd->payload, data, count); + memcpy(cmd->payload, notif, notif_size); ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(DATA_PATH_GROUP, @@ -802,10 +747,18 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rxq_sync_notification *notif; struct iwl_mvm_internal_rxq_notif *internal_notif; + u32 len = iwl_rx_packet_payload_len(pkt); notif = (void *)pkt->data; internal_notif = (void *)notif->payload; + if (WARN_ONCE(len < sizeof(*notif) + sizeof(*internal_notif), + "invalid notification size %d (%d)", + len, (int)(sizeof(*notif) + sizeof(*internal_notif)))) + return; + /* remove only the firmware header, we want all of our payload below */ + len -= sizeof(*notif); + if (internal_notif->sync && mvm->queue_sync_cookie != internal_notif->cookie) { WARN_ONCE(1, "Received expired RX queue sync message\n"); @@ -814,11 +767,22 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, switch (internal_notif->type) { case IWL_MVM_RXQ_EMPTY: + WARN_ONCE(len != sizeof(*internal_notif), + "invalid empty notification size %d (%d)", + len, (int)sizeof(*internal_notif)); break; case IWL_MVM_RXQ_NOTIF_DEL_BA: + if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif), + "invalid delba notification size %d (%d)", + len, (int)sizeof(struct iwl_mvm_rss_sync_notif))) + break; iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data); break; case IWL_MVM_RXQ_NSSN_SYNC: + if (WARN_ONCE(len != sizeof(struct iwl_mvm_rss_sync_notif), + "invalid nssn sync notification size %d (%d)", + len, (int)sizeof(struct iwl_mvm_rss_sync_notif))) + break; iwl_mvm_nssn_sync(mvm, napi, queue, (void *)internal_notif->data); break; @@ -826,9 +790,13 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi, WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } - if (internal_notif->sync && - !atomic_dec_return(&mvm->queue_sync_counter)) - wake_up(&mvm->rx_sync_waitq); + if (internal_notif->sync) { + WARN_ONCE(!test_and_clear_bit(queue, &mvm->queue_sync_state), + "queue sync: queue %d responded a second time!\n", + queue); + if (READ_ONCE(mvm->queue_sync_state) == 0) + wake_up(&mvm->rx_sync_waitq); + } } static void iwl_mvm_oldsn_workaround(struct iwl_mvm *mvm, @@ -1314,7 +1282,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); - /* fall through */ + fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_SU: case IWL_RX_PHY_INFO_TYPE_HE_MU: case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: @@ -1387,7 +1355,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu); - /* fall through */ + fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_MU: he_mu->flags2 |= le16_encode_bits(le32_get_bits(phy_data->d1, @@ -1397,7 +1365,7 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, le16_encode_bits(le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); - /* fall through */ + fallthrough; case IWL_RX_PHY_INFO_TYPE_HE_TB: case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags, @@ -1591,6 +1559,8 @@ static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band) return NL80211_BAND_2GHZ; case PHY_BAND_5: return NL80211_BAND_5GHZ; + case PHY_BAND_6: + return NL80211_BAND_6GHZ; default: WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); return NL80211_BAND_5GHZ; @@ -1604,15 +1574,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; struct ieee80211_hdr *hdr; - u32 len = le16_to_cpu(desc->mpdu_len); + u32 len; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); u32 rate_n_flags, gp2_on_air_rise; - u16 phy_info = le16_to_cpu(desc->phy_info); + u16 phy_info; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0, channel, energy_a, energy_b; size_t desc_size; struct iwl_mvm_rx_phy_data phy_data = { - .d4 = desc->phy_data4, .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; bool csi = false; @@ -1620,13 +1590,22 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + desc_size = sizeof(*desc); + else + desc_size = IWL_RX_DESC_SIZE_V1; + + if (unlikely(pkt_len < desc_size)) { + IWL_DEBUG_DROP(mvm, "Bad REPLY_RX_MPDU_CMD size\n"); + return; + } + if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); channel = desc->v3.channel; gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); energy_a = desc->v3.energy_a; energy_b = desc->v3.energy_b; - desc_size = sizeof(*desc); phy_data.d0 = desc->v3.phy_data0; phy_data.d1 = desc->v3.phy_data1; @@ -1638,7 +1617,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); energy_a = desc->v1.energy_a; energy_b = desc->v1.energy_b; - desc_size = IWL_RX_DESC_SIZE_V1; phy_data.d0 = desc->v1.phy_data0; phy_data.d1 = desc->v1.phy_data1; @@ -1646,6 +1624,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, phy_data.d3 = desc->v1.phy_data3; } + len = le16_to_cpu(desc->mpdu_len); + + if (unlikely(len + desc_size > pkt_len)) { + IWL_DEBUG_DROP(mvm, "FW lied about packet len\n"); + return; + } + + phy_info = le16_to_cpu(desc->phy_info); + phy_data.d4 = desc->phy_data4; + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) phy_data.info_type = le32_get_bits(phy_data.d1, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 875281cf7fc0..97d2de8f1582 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1,69 +1,12 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/etherdevice.h> #include <net/mac80211.h> +#include <linux/crc32.h> #include "mvm.h" #include "fw/api/scan.h" @@ -148,6 +91,9 @@ struct iwl_mvm_scan_params { int n_scan_plans; struct cfg80211_sched_scan_plan *scan_plans; bool iter_notif; + struct cfg80211_scan_6ghz_params *scan_6ghz_params; + u32 n_6ghz_params; + bool scan_6ghz; }; static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) @@ -568,7 +514,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, { struct iwl_scan_offload_profile *profile; struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1; - struct iwl_scan_offload_blacklist *blacklist; + struct iwl_scan_offload_blocklist *blocklist; struct iwl_scan_offload_profile_cfg_data *data; int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw); int profile_cfg_size = sizeof(*data) + @@ -579,7 +525,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, .dataflags[0] = IWL_HCMD_DFL_NOCOPY, .dataflags[1] = IWL_HCMD_DFL_NOCOPY, }; - int blacklist_len; + int blocklist_len; int i; int ret; @@ -587,22 +533,22 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, return -EIO; if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL) - blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; + blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN; else - blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; + blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN; - blacklist = kcalloc(blacklist_len, sizeof(*blacklist), GFP_KERNEL); - if (!blacklist) + blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL); + if (!blocklist) return -ENOMEM; profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL); if (!profile_cfg_v1) { ret = -ENOMEM; - goto free_blacklist; + goto free_blocklist; } - cmd.data[0] = blacklist; - cmd.len[0] = sizeof(*blacklist) * blacklist_len; + cmd.data[0] = blocklist; + cmd.len[0] = sizeof(*blocklist) * blocklist_len; cmd.data[1] = profile_cfg_v1; /* if max_profile is MAX_PROFILES_V2, we have the new API */ @@ -615,7 +561,7 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, data = &profile_cfg_v1->data; } - /* No blacklist configuration */ + /* No blocklist configuration */ data->num_profiles = req->n_match_sets; data->active_clients = SCAN_CLIENT_SCHED_SCAN; data->pass_match = SCAN_CLIENT_SCHED_SCAN; @@ -639,8 +585,8 @@ iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, ret = iwl_mvm_send_cmd(mvm, &cmd); kfree(profile_cfg_v1); -free_blacklist: - kfree(blacklist); +free_blocklist: + kfree(blocklist); return ret; } @@ -844,6 +790,12 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); pos += ies->len[NL80211_BAND_5GHZ]; + memcpy(pos, ies->ies[NL80211_BAND_6GHZ], + ies->len[NL80211_BAND_6GHZ]); + params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf); + params->preq.band_data[2].len = + cpu_to_le16(ies->len[NL80211_BAND_6GHZ]); + pos += ies->len[NL80211_BAND_6GHZ]; memcpy(pos, ies->common_ies, ies->common_ie_len); params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); @@ -1516,6 +1468,14 @@ static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = { .channel_spacing_shift = 2, .band = PHY_BAND_5 }, + { + .start_idx = 51, + .end_idx = 111, + .first_channel_id = 1, + .last_channel_id = 241, + .channel_spacing_shift = 2, + .band = PHY_BAND_6 + }, }; static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band) @@ -1687,11 +1647,210 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm, cfg->flags = cpu_to_le32(flags | n_aps_flag); cfg->v2.channel_num = channels[i]->hw_value; cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band); + if (cfg80211_channel_is_psc(channels[i])) + cfg->flags = 0; cfg->v2.iter_count = 1; cfg->v2.iter_interval = 0; } } +static int +iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params, + __le32 *cmd_short_ssid, u8 *cmd_bssid, + u8 *scan_ssid_num, u8 *bssid_num) +{ + int j, idex_s = 0, idex_b = 0; + struct cfg80211_scan_6ghz_params *scan_6ghz_params = + params->scan_6ghz_params; + + if (!params->n_6ghz_params) { + for (j = 0; j < params->n_ssids; j++) { + cmd_short_ssid[idex_s++] = + cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, + params->ssids[j].ssid_len)); + (*scan_ssid_num)++; + } + return 0; + } + + /* + * Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz + * collocated parameters. This might not be optimal, as this processing + * does not (yet) correspond to the actual channels, so it is possible + * that some entries would be left out. + * + * TODO: improve this logic. + */ + for (j = 0; j < params->n_6ghz_params; j++) { + int k; + + /* First, try to place the short SSID */ + if (scan_6ghz_params[j].short_ssid_valid) { + for (k = 0; k < idex_s; k++) { + if (cmd_short_ssid[k] == + cpu_to_le32(scan_6ghz_params[j].short_ssid)) + break; + } + + if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) { + cmd_short_ssid[idex_s++] = + cpu_to_le32(scan_6ghz_params[j].short_ssid); + (*scan_ssid_num)++; + } + } + + /* try to place BSSID for the same entry */ + for (k = 0; k < idex_b; k++) { + if (!memcmp(&cmd_bssid[ETH_ALEN * k], + scan_6ghz_params[j].bssid, ETH_ALEN)) + break; + } + + if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) { + memcpy(&cmd_bssid[ETH_ALEN * idex_b++], + scan_6ghz_params[j].bssid, ETH_ALEN); + (*bssid_num)++; + } + } + return 0; +} + +/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */ +static void +iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params, + u32 n_channels, __le32 *cmd_short_ssid, + u8 *cmd_bssid, u8 scan_ssid_num, + u8 bssid_num, + struct iwl_scan_channel_params_v6 *cp, + enum nl80211_iftype vif_type) +{ + struct iwl_scan_channel_cfg_umac *channel_cfg = cp->channel_config; + int i; + struct cfg80211_scan_6ghz_params *scan_6ghz_params = + params->scan_6ghz_params; + + for (i = 0; i < params->n_channels; i++) { + struct iwl_scan_channel_cfg_umac *cfg = + &cp->channel_config[i]; + + u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; + u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries; + bool force_passive, found = false, + unsolicited_probe_on_chan = false, psc_no_listen = false; + + cfg->v1.channel_num = params->channels[i]->hw_value; + cfg->v2.band = 2; + cfg->v2.iter_count = 1; + cfg->v2.iter_interval = 0; + + /* + * The optimize the scan time, i.e., reduce the scan dwell time + * on each channel, the below logic tries to set 3 direct BSSID + * probe requests for each broadcast probe request with a short + * SSID. + * TODO: improve this logic + */ + n_used_bssid_entries = 3; + for (j = 0; j < params->n_6ghz_params; j++) { + if (!(scan_6ghz_params[j].channel_idx == i)) + continue; + + found = false; + unsolicited_probe_on_chan |= + scan_6ghz_params[j].unsolicited_probe; + psc_no_listen |= scan_6ghz_params[j].psc_no_listen; + + for (k = 0; k < scan_ssid_num; k++) { + if (!scan_6ghz_params[j].unsolicited_probe && + le32_to_cpu(cmd_short_ssid[k]) == + scan_6ghz_params[j].short_ssid) { + /* Relevant short SSID bit set */ + if (s_ssid_bitmap & BIT(k)) { + found = true; + break; + } + + /* + * Use short SSID only to create a new + * iteration during channel dwell. + */ + if (n_used_bssid_entries >= 3) { + s_ssid_bitmap |= BIT(k); + s_max++; + n_used_bssid_entries -= 3; + found = true; + break; + } + } + } + + if (found) + continue; + + for (k = 0; k < bssid_num; k++) { + if (!memcmp(&cmd_bssid[ETH_ALEN * k], + scan_6ghz_params[j].bssid, + ETH_ALEN)) { + if (!(bssid_bitmap & BIT(k))) { + bssid_bitmap |= BIT(k); + b_max++; + n_used_bssid_entries++; + } + break; + } + } + } + + flags = bssid_bitmap | (s_ssid_bitmap << 16); + + if (cfg80211_channel_is_psc(params->channels[i]) && + psc_no_listen) + flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN; + + if (unsolicited_probe_on_chan) + flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES; + + /* + * In the following cases apply passive scan: + * 1. Non fragmented scan: + * - PSC channel with NO_LISTEN_FLAG on should be treated + * like non PSC channel + * - Non PSC channel with more than 3 short SSIDs or more + * than 9 BSSIDs. + * - Non PSC Channel with unsolicited probe response and + * more than 2 short SSIDs or more than 6 BSSIDs. + * - PSC channel with more than 2 short SSIDs or more than + * 6 BSSIDs. + * 3. Fragmented scan: + * - PSC channel with more than 1 SSID or 3 BSSIDs. + * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. + * - Non PSC channel with unsolicited probe response and + * more than 1 SSID or more than 3 BSSIDs. + */ + if (!iwl_mvm_is_scan_fragmented(params->type)) { + if (!cfg80211_channel_is_psc(params->channels[i]) || + flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) { + force_passive = (s_max > 3 || b_max > 9); + force_passive |= (unsolicited_probe_on_chan && + (s_max > 2 || b_max > 6)); + } else { + force_passive = (s_max > 2 || b_max > 6); + } + } else if (cfg80211_channel_is_psc(params->channels[i])) { + force_passive = (s_max > 1 || b_max > 3); + } else { + force_passive = (s_max > 2 || b_max > 6); + force_passive |= (unsolicited_probe_on_chan && + (s_max > 1 || b_max > 3)); + } + if (force_passive || + (!flags && !cfg80211_channel_is_psc(params->channels[i]))) + flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; + + channel_cfg[i].flags |= cpu_to_le32(flags); + } +} + static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, struct ieee80211_vif *vif) @@ -1746,6 +1905,10 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm, if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE; + if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) && + params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN; + return flags; } @@ -1925,8 +2088,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule, &tail_v2->delay); - if (ret) + if (ret) { + mvm->scan_uid_status[uid] = 0; return ret; + } if (iwl_mvm_is_scan_ext_chan_supported(mvm)) { tail_v2->preq = params->preq; @@ -2056,6 +2221,8 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, { struct iwl_scan_req_umac_v14 *cmd = mvm->scan_cmd; struct iwl_scan_req_params_v14 *scan_p = &cmd->scan_params; + struct iwl_scan_channel_params_v6 *cp = &scan_p->channel_params; + struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params; int ret; u16 gen_flags; u32 bitmap_ssid = 0; @@ -2078,8 +2245,34 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params, &bitmap_ssid); - iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif, - &scan_p->channel_params, bitmap_ssid); + if (!params->scan_6ghz) { + iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif, + &scan_p->channel_params, bitmap_ssid); + + return 0; + } + cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif); + cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; + cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; + + ret = iwl_mvm_umac_scan_fill_6g_chan_list(params, pb->short_ssid, + pb->bssid_array[0], + &pb->short_ssid_num, + &pb->bssid_num); + if (ret) + return ret; + + iwl_mvm_umac_scan_cfg_channels_v6_6g(params, + params->n_channels, + pb->short_ssid, + pb->bssid_array[0], + pb->short_ssid_num, + pb->bssid_num, cp, + vif->type); + cp->count = params->n_channels; + if (!params->n_ssids || + (params->n_ssids == 1 && !params->ssids[0].ssid_len)) + cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER; return 0; } @@ -2148,7 +2341,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) /* Something is wrong if no scan was running but we * ran out of scans. */ - /* fall through */ + fallthrough; default: WARN_ON(1); break; @@ -2209,7 +2402,7 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params, int type) { - int uid, i; + int uid, i, err; u8 scan_ver; lockdep_assert_held(&mvm->mutex); @@ -2241,7 +2434,11 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm, return ver_handler->handler(mvm, vif, params, type, uid); } - return iwl_mvm_scan_umac(mvm, vif, params, type, uid); + err = iwl_mvm_scan_umac(mvm, vif, params, type, uid); + if (err) + return err; + + return uid; } int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2254,7 +2451,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_mvm_scan_params params = {}; - int ret; + int ret, uid; struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 }; lockdep_assert_held(&mvm->mutex); @@ -2291,6 +2488,9 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_plans = &scan_plan; params.n_scan_plans = 1; + params.n_6ghz_params = req->n_6ghz_params; + params.scan_6ghz_params = req->scan_6ghz_params; + params.scan_6ghz = req->scan_6ghz; iwl_mvm_fill_scan_type(mvm, ¶ms, vif); if (req->duration) @@ -2298,11 +2498,11 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, IWL_MVM_SCAN_REGULAR); - if (ret) - return ret; + if (uid < 0) + return uid; iwl_mvm_pause_tcm(mvm, false); @@ -2314,6 +2514,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); iwl_mvm_resume_tcm(mvm); + mvm->scan_uid_status[uid] = 0; return ret; } @@ -2339,7 +2540,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, .dataflags = { IWL_HCMD_DFL_NOCOPY, }, }; struct iwl_mvm_scan_params params = {}; - int ret; + int ret, uid; + int i, j; + bool non_psc_included = false; lockdep_assert_held(&mvm->mutex); @@ -2356,8 +2559,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, if (WARN_ON(!mvm->scan_cmd)) return -ENOMEM; - if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels)) - return -ENOBUFS; params.n_ssids = req->n_ssids; params.flags = req->flags; @@ -2397,10 +2598,46 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, iwl_mvm_build_scan_probe(mvm, vif, ies, ¶ms); - ret = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); + /* for 6 GHZ band only PSC channels need to be added */ + for (i = 0; i < params.n_channels; i++) { + struct ieee80211_channel *channel = params.channels[i]; - if (ret) - return ret; + if (channel->band == NL80211_BAND_6GHZ && + !cfg80211_channel_is_psc(channel)) { + non_psc_included = true; + break; + } + } + + if (non_psc_included) { + params.channels = kmemdup(params.channels, + sizeof(params.channels[0]) * + params.n_channels, + GFP_KERNEL); + if (!params.channels) + return -ENOMEM; + + for (i = j = 0; i < params.n_channels; i++) { + if (params.channels[i]->band == NL80211_BAND_6GHZ && + !cfg80211_channel_is_psc(params.channels[i])) + continue; + params.channels[j++] = params.channels[i]; + } + params.n_channels = j; + } + + if (non_psc_included && + !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) { + kfree(params.channels); + return -ENOBUFS; + } + + uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, ¶ms, type); + + if (non_psc_included) + kfree(params.channels); + if (uid < 0) + return uid; ret = iwl_mvm_send_cmd(mvm, &hcmd); if (!ret) { @@ -2413,6 +2650,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, * should try to send the command again with different params. */ IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret); + mvm->scan_uid_status[uid] = 0; + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 368b9d117f73..655da8856c75 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -1,64 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2018-2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2018-2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2013-2014, 2018-2019 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + */ #include "mvm.h" /* For counting bound interfaces */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 799d8219463c..dc174410bf9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2015, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2015, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <net/mac80211.h> #include "mvm.h" @@ -144,13 +89,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); - /* fall through */ + fallthrough; case IEEE80211_STA_RX_BW_80: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); - /* fall through */ + fallthrough; case IEEE80211_STA_RX_BW_40: add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); - /* fall through */ + fallthrough; case IEEE80211_STA_RX_BW_20: if (sta->ht_cap.ht_supported) add_sta_cmd.station_flags |= @@ -196,7 +141,16 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mpdu_dens = sta->ht_cap.ampdu_density; } + if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) { + add_sta_cmd.station_flags_msk |= + cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | + STA_FLG_AGG_MPDU_DENS_MSK); + mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + agg_size = le16_get_bits(sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + } else if (sta->vht_cap.vht_supported) { agg_size = sta->vht_cap.cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; @@ -3286,14 +3240,14 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, break; case WLAN_CIPHER_SUITE_WEP104: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_WEP40: key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(u.cmd.common.key + 3, key->key, key->keylen); break; case WLAN_CIPHER_SUITE_GCMP_256: key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_GCMP: key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); memcpy(u.cmd.common.key, key->key, key->keylen); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d7578c981a65..35a18b96aac5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2016 Intel Deutschland GmbH + */ #ifndef __sta_h__ #define __sta_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 2ad959b4ce0a..bf04326e35ff 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(C) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(C) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2014 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include <linux/etherdevice.h> #include "mvm.h" #include "time-event.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 1db6d8d38822..4e1bdf13e5e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2017 Intel Deutschland GmbH + */ #include <linux/jiffies.h> #include <net/mac80211.h> diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index b6bac776f236..989a5319fb21 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -1,65 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + */ #ifndef __time_event_h__ #define __time_event_h__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 340c892b30ff..507625f96dd7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -1,67 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * Copyright(c) 2019 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2019-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2016 Intel Deutschland GmbH + */ #include <linux/sort.h> #include "mvm.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index fe1c538cd718..a983c215df31 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/ieee80211.h> #include <linux/etherdevice.h> #include <linux/tcp.h> @@ -105,11 +48,8 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, u16 mh_len = ieee80211_hdrlen(hdr->frame_control); u8 protocol = 0; - /* - * Do not compute checksum if already computed or if transport will - * compute it - */ - if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD) + /* Do not compute checksum if already computed */ + if (skb->ip_summed != CHECKSUM_PARTIAL) goto out; /* We do not expect to be requested to csum stuff we do not support */ @@ -459,7 +399,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_WEP104: tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_WEP40: tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) & @@ -470,7 +410,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: type = TX_CMD_SEC_GCMP; - /* Fall through */ + fallthrough; case WLAN_CIPHER_SUITE_CCMP_256: /* TODO: Taking the key from the table might introduce a race * when PTK rekeying is done, having an old packets with a PN diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 3123036978a5..ee2e0cb47584 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015-2017 Intel Deutschland GmbH + */ #include <net/mac80211.h> #include "iwl-debug.h" @@ -422,7 +367,7 @@ struct iwl_umac_error_event_table { static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; - struct iwl_umac_error_event_table table; + struct iwl_umac_error_event_table table = {}; u32 base = mvm->trans->dbg.umac_error_event_table; if (!base && @@ -461,7 +406,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) { struct iwl_trans *trans = mvm->trans; - struct iwl_error_event_table table; + struct iwl_error_event_table table = {}; u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num]; if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 5512e3c630c3..36bf414a388a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,54 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2018-2020 Intel Corporation + */ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info-gen3.h" @@ -148,7 +101,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, case IWL_AMSDU_12K: control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */ - control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K; + control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K; break; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 13fe9c00d7e8..d1bb273d6b6d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -1,56 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include "iwl-trans.h" #include "iwl-fh.h" #include "iwl-context-info.h" @@ -243,7 +195,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, rb_size = IWL_CTXT_INFO_RB_SIZE_8K; break; case IWL_AMSDU_12K: - rb_size = IWL_CTXT_INFO_RB_SIZE_12K; + rb_size = IWL_CTXT_INFO_RB_SIZE_16K; break; default: WARN_ON(1); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 7b5ece380fbf..965982612e74 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,65 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright(c) 2007 - 2014, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2005 - 2014, 2018 - 2020 Intel Corporation - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> @@ -992,6 +936,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_ma_a0_mr_a0, iwl_ma_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_snj_a0_mr_a0, iwl_ma_name), + #endif /* CONFIG_IWLMVM */ }; @@ -1006,7 +956,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; struct iwl_trans *iwl_trans; struct iwl_trans_pcie *trans_pcie; - unsigned long flags; int i, ret; /* * This is needed for backwards compatibility with the old @@ -1136,21 +1085,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) trans_pcie->num_rx_bufs = RX_QUEUE_SIZE; } - if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000 && - iwl_trans_grab_nic_access(iwl_trans, &flags)) { - u32 hw_step; - - hw_step = iwl_read_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG); - hw_step |= ENABLE_WFPM; - iwl_write_umac_prph_no_grab(iwl_trans, WFPM_CTRL_REG, hw_step); - hw_step = iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP); - hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; - if (hw_step == 0x3) - iwl_trans->hw_rev = (iwl_trans->hw_rev & 0xFFFFFFF3) | - (SILICON_C_STEP << 2); - iwl_trans_release_nic_access(iwl_trans, &flags); - } - pci_set_drvdata(pdev, iwl_trans); iwl_trans->drv = iwl_drv_start(iwl_trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index ff542d2f0054..a528d3d99c5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2003-2015, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #ifndef __iwl_trans_int_pcie_h__ #define __iwl_trans_int_pcie_h__ @@ -348,8 +291,6 @@ struct cont_rec { * @def_rx_queue - default rx queue number * @rx_buf_size: Rx buffer size * @scd_set_active: should the transport configure the SCD for HCMD queue - * @sw_csum_tx: if true, then the transport will compute the csum of the TXed - * frame. * @rx_page_order: page order for receive buffer size * @rx_buf_bytes: RX buffer (RB) size in bytes * @reg_lock: protect hw register access @@ -432,7 +373,6 @@ struct iwl_trans_pcie { enum iwl_amsdu_size rx_buf_size; bool scd_set_active; - bool sw_csum_tx; bool pcie_dbg_dumped_once; u32 rx_page_order; u32 rx_buf_bytes; @@ -466,6 +406,10 @@ struct iwl_trans_pcie { void *base_rb_stts; dma_addr_t base_rb_stts_dma; + + bool fw_reset_handshake; + bool fw_reset_done; + wait_queue_head_t fw_reset_waitq; }; static inline struct iwl_trans_pcie * @@ -551,9 +495,6 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); -void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs); -void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); /***************************************************** @@ -839,7 +780,6 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); -void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); void iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 94299f259518..37bbd9a07f36 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/sched.h> #include <linux/wait.h> #include <linux/gfp.h> @@ -1296,9 +1239,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { struct iwl_rx_packet *pkt; - u16 sequence; bool reclaim; - int index, cmd_index, len; + int len; struct iwl_rx_cmd_buffer rxcb = { ._offset = rxb->offset + offset, ._rx_page_order = trans_pcie->rx_page_order, @@ -1335,6 +1277,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, len = iwl_rx_packet_len(pkt); len += sizeof(u32); /* account for status word */ + + offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); + + /* check that what the device tells us made sense */ + if (offset > max_len) + break; + trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); @@ -1357,10 +1306,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } } - sequence = le16_to_cpu(pkt->hdr.sequence); - index = SEQ_TO_INDEX(sequence); - cmd_index = iwl_txq_get_cmd_index(txq, index); - if (rxq->id == trans_pcie->def_rx_queue) iwl_op_mode_rx(trans->op_mode, &rxq->napi, &rxcb); @@ -1368,17 +1313,19 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, &rxcb, rxq->id); - if (reclaim) { - kfree_sensitive(txq->entries[cmd_index].free_buf); - txq->entries[cmd_index].free_buf = NULL; - } - /* * After here, we should always check rxcb._page_stolen, * if it is true then one of the handlers took the page. */ if (reclaim) { + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index = iwl_txq_get_cmd_index(txq, index); + + kfree_sensitive(txq->entries[cmd_index].free_buf); + txq->entries[cmd_index].free_buf = NULL; + /* Invoke any callbacks, transfer the buffer to caller, * and fire off the (possibly) blocking * iwl_trans_send_cmd() @@ -1392,7 +1339,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, page_stolen |= rxcb._page_stolen; if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) break; - offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } /* page was stolen from us -- free our reference */ @@ -2277,17 +2223,6 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { - /* Reflect IML transfer status */ - int res = iwl_read32(trans, CSR_IML_RESP_ADDR); - - IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); - if (res == IWL_IMAGE_RESP_FAIL) { - isr_stats->sw++; - iwl_pcie_irq_handle_error(trans); - } - } - /* Chip got too hot and stopped itself */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); @@ -2307,6 +2242,12 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) iwl_pcie_irq_handle_error(trans); } + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) { + IWL_DEBUG_ISR(trans, "Reset flow completed\n"); + trans_pcie->fw_reset_done = true; + wake_up(&trans_pcie->fw_reset_waitq); + } + iwl_pcie_clear_irq(trans, entry); lock_map_release(&trans->sync_cmd_lockdep_map); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 91ec9379c061..c602b815dcc2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -1,55 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" @@ -135,6 +88,28 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } +static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + trans_pcie->fw_reset_done = false; + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, + UREG_NIC_SET_NMI_DRIVER_RESET_HANDSHAKE); + else + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE); + + /* wait 200ms */ + ret = wait_event_timeout(trans_pcie->fw_reset_waitq, + trans_pcie->fw_reset_done, HZ / 5); + if (!ret) + IWL_ERR(trans, + "firmware didn't ACK the reset - continue anyway\n"); +} + void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -144,6 +119,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans) if (trans_pcie->is_down) return; + if (trans_pcie->fw_reset_handshake && + trans->state >= IWL_TRANS_FW_STARTED) + iwl_trans_pcie_fw_reset_handshake(trans); + trans_pcie->is_down = true; /* tell the device to stop sending interrupts */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2fffbbc8462f..285e0d586021 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1,64 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2007-2015, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/debugfs.h> @@ -1103,7 +1048,7 @@ static struct iwl_causes_list causes_list[] = { {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, - {MSIX_HW_INT_CAUSES_REG_IML, CSR_MSIX_HW_INT_MASK_AD, 0x12}, + {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, @@ -1934,7 +1879,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; - trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; @@ -1946,6 +1890,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, */ if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); + + trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; } void iwl_trans_pcie_free(struct iwl_trans *trans) @@ -2214,59 +2160,6 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, ofs, val); } -static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, - unsigned long txqs, - bool freeze) -{ - int queue; - - for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = trans->txqs.txq[queue]; - unsigned long now; - - spin_lock_bh(&txq->lock); - - now = jiffies; - - if (txq->frozen == freeze) - goto next_queue; - - IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", - freeze ? "Freezing" : "Waking", queue); - - txq->frozen = freeze; - - if (txq->read_ptr == txq->write_ptr) - goto next_queue; - - if (freeze) { - if (unlikely(time_after(now, - txq->stuck_timer.expires))) { - /* - * The timer should have fired, maybe it is - * spinning right now on the lock. - */ - goto next_queue; - } - /* remember how long until the timer fires */ - txq->frozen_expiry_remainder = - txq->stuck_timer.expires - now; - del_timer(&txq->stuck_timer); - goto next_queue; - } - - /* - * Wake a non-empty queue -> arm timer with the - * remainder before it froze - */ - mod_timer(&txq->stuck_timer, - now + txq->frozen_expiry_remainder); - -next_queue: - spin_unlock_bh(&txq->lock); - } -} - static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { int i; @@ -3133,6 +3026,8 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, fw_mon_data->fw_mon_base_high_ptr = cpu_to_le32(iwl_read_prph(trans, base_high)); write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; + /* convert wrtPtr to DWs, to align with all HWs */ + write_ptr_val >>= 2; } fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); } @@ -3440,7 +3335,7 @@ static const struct iwl_trans_ops trans_ops_pcie = { .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, - .reclaim = iwl_trans_pcie_reclaim, + .reclaim = iwl_txq_reclaim, .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, @@ -3449,7 +3344,7 @@ static const struct iwl_trans_ops trans_ops_pcie = { .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, - .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, + .freeze_txq_timer = iwl_trans_txq_freeze_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, #ifdef CONFIG_IWLWIFI_DEBUGFS .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, @@ -3467,9 +3362,9 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .send_cmd = iwl_trans_pcie_gen2_send_hcmd, .tx = iwl_txq_gen2_tx, - .reclaim = iwl_trans_pcie_reclaim, + .reclaim = iwl_txq_reclaim, - .set_q_ptrs = iwl_trans_pcie_set_q_ptrs, + .set_q_ptrs = iwl_txq_set_q_ptrs, .txq_alloc = iwl_txq_dyn_alloc, .txq_free = iwl_txq_dyn_free, @@ -3511,6 +3406,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->alloc_page_lock); mutex_init(&trans_pcie->mutex); init_waitqueue_head(&trans_pcie->ucode_write_waitq); + init_waitqueue_head(&trans_pcie->fw_reset_waitq); trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); @@ -3587,26 +3483,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * "dash" value). To keep hw_rev backwards compatible - we'll store it * in the old format. */ - if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) { + if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); - ret = iwl_pcie_prepare_card_hw(trans); - if (ret) { - IWL_WARN(trans, "Exit HW not ready\n"); - goto out_no_pci; - } - - /* - * in-order to recognize C step driver should read chip version - * id located at the AUX bus MISC address space. - */ - ret = iwl_finish_nic_init(trans, cfg_trans); - if (ret) - goto out_no_pci; - - } - IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); @@ -3637,7 +3517,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } - trans_pcie->inta_mask = CSR_INI_SET_MASK; } #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index baa83a0b8593..8757246a90d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1,55 +1,8 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2017 Intel Deutschland GmbH + * Copyright (C) 2018-2020 Intel Corporation + */ #include <net/tso.h> #include <linux/tcp.h> diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 966be5689d63..5dda0015522d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,66 +1,9 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 - 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2003-2014, 2018-2020 Intel Corporation + * Copyright (C) 2013-2015 Intel Mobile Communications GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH + */ #include <linux/etherdevice.h> #include <linux/ieee80211.h> #include <linux/slab.h> @@ -205,47 +148,6 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, tfd_fh->num_tbs = idx + 1; } -/* - * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @trans - transport private data - * @txq - tx queue - * @dma_dir - the direction of the DMA mapping - * - * Does NOT advance any TFD circular buffer read/write indexes - * Does NOT free the TFD itself (which is within circular buffer) - */ -void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int rd_ptr = txq->read_ptr; - int idx = iwl_txq_get_cmd_index(txq, rd_ptr); - - lockdep_assert_held(&txq->lock); - - /* We have only q->n_window txq->entries, but we use - * TFD_QUEUE_SIZE_MAX tfds - */ - iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); - - /* free SKB */ - if (txq->entries) { - struct sk_buff *skb; - - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } - } -} - static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { @@ -312,7 +214,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_txq_free_tso_page(trans, skb); } - iwl_pcie_txq_free_tfd(trans, txq); + iwl_txq_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { @@ -721,160 +623,6 @@ error: return ret; } -static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - if (!txq->wd_timeout) - return; - - /* - * station is asleep and we send data - that must - * be uAPSD or PS-Poll. Don't rearm the timer. - */ - if (txq->frozen) - return; - - /* - * if empty delete timer, otherwise move timer forward - * since we're making progress on this queue - */ - if (txq->read_ptr == txq->write_ptr) - del_timer(&txq->stuck_timer); - else - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); -} - -/* Frees buffers until index _not_ inclusive */ -void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - int tfd_num = iwl_txq_get_cmd_index(txq, ssn); - int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); - int last_to_free; - - /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) - return; - - spin_lock_bh(&txq->lock); - - if (!test_bit(txq_id, trans->txqs.queue_used)) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", - txq_id, ssn); - goto out; - } - - if (read_ptr == tfd_num) - goto out; - - IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", - txq_id, txq->read_ptr, tfd_num, ssn); - - /*Since we free until index _not_ inclusive, the one before index is - * the last we will free. This one must be used */ - last_to_free = iwl_txq_dec_wrap(trans, tfd_num); - - if (!iwl_txq_used(txq, last_to_free)) { - IWL_ERR(trans, - "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, - trans->trans_cfg->base_params->max_tfd_queue_size, - txq->write_ptr, txq->read_ptr); - goto out; - } - - if (WARN_ON(!skb_queue_empty(skbs))) - goto out; - - for (; - read_ptr != tfd_num; - txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { - struct sk_buff *skb = txq->entries[read_ptr].skb; - - if (WARN_ON_ONCE(!skb)) - continue; - - iwl_txq_free_tso_page(trans, skb); - - __skb_queue_tail(skbs, skb); - - txq->entries[read_ptr].skb = NULL; - - if (!trans->trans_cfg->use_tfh) - iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); - - iwl_pcie_txq_free_tfd(trans, txq); - } - - iwl_pcie_txq_progress(txq); - - if (iwl_txq_space(trans, txq) > txq->low_mark && - test_bit(txq_id, trans->txqs.queue_stopped)) { - struct sk_buff_head overflow_skbs; - - __skb_queue_head_init(&overflow_skbs); - skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); - - /* - * We are going to transmit from the overflow queue. - * Remember this state so that wait_for_txq_empty will know we - * are adding more packets to the TFD queue. It cannot rely on - * the state of &txq->overflow_q, as we just emptied it, but - * haven't TXed the content yet. - */ - txq->overflow_tx = true; - - /* - * This is tricky: we are in reclaim path which is non - * re-entrant, so noone will try to take the access the - * txq data from that path. We stopped tx, so we can't - * have tx as well. Bottom line, we can unlock and re-lock - * later. - */ - spin_unlock_bh(&txq->lock); - - while (!skb_queue_empty(&overflow_skbs)) { - struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_tx_cmd *dev_cmd_ptr; - - dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); - - /* - * Note that we can very well be overflowing again. - * In that case, iwl_txq_space will be small again - * and we won't wake mac80211's queue. - */ - iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); - } - - if (iwl_txq_space(trans, txq) > txq->low_mark) - iwl_wake_queue(trans, txq); - - spin_lock_bh(&txq->lock); - txq->overflow_tx = false; - } - -out: - spin_unlock_bh(&txq->lock); -} - -/* Set wr_ptr of specific device and txq */ -void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - - spin_lock_bh(&txq->lock); - - txq->write_ptr = ptr; - txq->read_ptr = txq->write_ptr; - - spin_unlock_bh(&txq->lock); -} - static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { @@ -962,7 +710,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } - iwl_pcie_txq_progress(txq); + iwl_txq_progress(txq); } static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, @@ -1692,25 +1440,6 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, - bool ipv6, unsigned int len) -{ - if (ipv6) { - struct ipv6hdr *iphv6 = iph; - - tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr, - len + tcph->doff * 4, - IPPROTO_TCP, 0); - } else { - struct iphdr *iphv4 = iph; - - ip_send_check(iphv4); - tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr, - len + tcph->doff * 4, - IPPROTO_TCP, 0); - } -} - static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, @@ -1718,8 +1447,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(txq->trans); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; @@ -1778,8 +1505,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct sk_buff *csum_skb = NULL; unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; - struct tcphdr *tcph; - u8 *iph, *subf_hdrs_start = hdr_page->pos; + u8 *subf_hdrs_start = hdr_page->pos; total_len -= data_left; @@ -1801,27 +1527,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, * as MAC header. */ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); - iph = hdr_page->pos + 8; - tcph = (void *)(iph + ip_hdrlen); - - /* For testing on current hardware only */ - if (trans_pcie->sw_csum_tx) { - csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), - GFP_ATOMIC); - if (!csum_skb) - return -ENOMEM; - - iwl_compute_pseudo_hdr_csum(iph, tcph, - skb->protocol == - htons(ETH_P_IPV6), - data_left); - - skb_put_data(csum_skb, tcph, tcp_hdrlen(skb)); - skb_reset_transport_header(csum_skb); - csum_skb->csum_start = - (unsigned char *)tcp_hdr(csum_skb) - - csum_skb->head; - } hdr_page->pos += snap_ip_tcp_hdrlen; @@ -1848,9 +1553,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, data_left); dma_addr_t tb_phys; - if (trans_pcie->sw_csum_tx) - skb_put_data(csum_skb, tso.data, size); - tb_phys = dma_map_single(trans->dev, tso.data, size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { @@ -1866,23 +1568,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, data_left -= size; tso_build_data(skb, &tso, size); } - - /* For testing on early hardware only */ - if (trans_pcie->sw_csum_tx) { - __wsum csum; - - csum = skb_checksum(csum_skb, - skb_checksum_start_offset(csum_skb), - csum_skb->len - - skb_checksum_start_offset(csum_skb), - 0); - dev_kfree_skb(csum_skb); - dma_sync_single_for_cpu(trans->dev, hdr_tb_phys, - hdr_tb_len, DMA_TO_DEVICE); - tcph->check = csum_fold(csum); - dma_sync_single_for_device(trans->dev, hdr_tb_phys, - hdr_tb_len, DMA_TO_DEVICE); - } } /* re -add the WiFi header and IV */ @@ -1907,7 +1592,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; @@ -1928,21 +1612,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, "TX on unused queue %d\n", txq_id)) return -EINVAL; - if (unlikely(trans_pcie->sw_csum_tx && - skb->ip_summed == CHECKSUM_PARTIAL)) { - int offs = skb_checksum_start_offset(skb); - int csum_offs = offs + skb->csum_offset; - __wsum csum; - - if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16))) - return -1; - - csum = skb_checksum(skb, offs, skb->len - offs, 0); - *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); - - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - if (skb_is_nonlinear(skb) && skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && __skb_linearize(skb)) @@ -2017,7 +1686,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, amsdu = ieee80211_is_data_qos(fc) && (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); - if (trans_pcie->sw_csum_tx || !amsdu) { + if (!amsdu) { tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index af0b27a68d84..27eea909e32d 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1,53 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * BSD LICENSE - * - * Copyright(c) 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2020 Intel Corporation + */ #include <net/tso.h> #include <linux/tcp.h> @@ -1311,11 +1265,7 @@ void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) return; } - iwl_txq_gen2_unmap(trans, queue); - - iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]); - - trans->txqs.txq[queue] = NULL; + iwl_txq_gen2_free(trans, queue); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); } @@ -1527,3 +1477,251 @@ void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } + +/* + * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @trans - transport private data + * @txq - tx queue + * @dma_dir - the direction of the DMA mapping + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int rd_ptr = txq->read_ptr; + int idx = iwl_txq_get_cmd_index(txq, rd_ptr); + + lockdep_assert_held(&txq->lock); + + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); + + /* free SKB */ + if (txq->entries) { + struct sk_buff *skb; + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } + } +} + +void iwl_txq_progress(struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + if (!txq->wd_timeout) + return; + + /* + * station is asleep and we send data - that must + * be uAPSD or PS-Poll. Don't rearm the timer. + */ + if (txq->frozen) + return; + + /* + * if empty delete timer, otherwise move timer forward + * since we're making progress on this queue + */ + if (txq->read_ptr == txq->write_ptr) + del_timer(&txq->stuck_timer); + else + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); +} + +/* Frees buffers until index _not_ inclusive */ +void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + int tfd_num = iwl_txq_get_cmd_index(txq, ssn); + int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); + int last_to_free; + + /* This function is not meant to release cmd queue*/ + if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) + return; + + spin_lock_bh(&txq->lock); + + if (!test_bit(txq_id, trans->txqs.queue_used)) { + IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", + txq_id, ssn); + goto out; + } + + if (read_ptr == tfd_num) + goto out; + + IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", + txq_id, txq->read_ptr, tfd_num, ssn); + + /*Since we free until index _not_ inclusive, the one before index is + * the last we will free. This one must be used */ + last_to_free = iwl_txq_dec_wrap(trans, tfd_num); + + if (!iwl_txq_used(txq, last_to_free)) { + IWL_ERR(trans, + "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, last_to_free, + trans->trans_cfg->base_params->max_tfd_queue_size, + txq->write_ptr, txq->read_ptr); + goto out; + } + + if (WARN_ON(!skb_queue_empty(skbs))) + goto out; + + for (; + read_ptr != tfd_num; + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { + struct sk_buff *skb = txq->entries[read_ptr].skb; + + if (WARN_ON_ONCE(!skb)) + continue; + + iwl_txq_free_tso_page(trans, skb); + + __skb_queue_tail(skbs, skb); + + txq->entries[read_ptr].skb = NULL; + + if (!trans->trans_cfg->use_tfh) + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); + + iwl_txq_free_tfd(trans, txq); + } + + iwl_txq_progress(txq); + + if (iwl_txq_space(trans, txq) > txq->low_mark && + test_bit(txq_id, trans->txqs.queue_stopped)) { + struct sk_buff_head overflow_skbs; + + __skb_queue_head_init(&overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); + + /* + * We are going to transmit from the overflow queue. + * Remember this state so that wait_for_txq_empty will know we + * are adding more packets to the TFD queue. It cannot rely on + * the state of &txq->overflow_q, as we just emptied it, but + * haven't TXed the content yet. + */ + txq->overflow_tx = true; + + /* + * This is tricky: we are in reclaim path which is non + * re-entrant, so noone will try to take the access the + * txq data from that path. We stopped tx, so we can't + * have tx as well. Bottom line, we can unlock and re-lock + * later. + */ + spin_unlock_bh(&txq->lock); + + while (!skb_queue_empty(&overflow_skbs)) { + struct sk_buff *skb = __skb_dequeue(&overflow_skbs); + struct iwl_device_tx_cmd *dev_cmd_ptr; + + dev_cmd_ptr = *(void **)((u8 *)skb->cb + + trans->txqs.dev_cmd_offs); + + /* + * Note that we can very well be overflowing again. + * In that case, iwl_txq_space will be small again + * and we won't wake mac80211's queue. + */ + iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); + } + + if (iwl_txq_space(trans, txq) > txq->low_mark) + iwl_wake_queue(trans, txq); + + spin_lock_bh(&txq->lock); + txq->overflow_tx = false; + } + +out: + spin_unlock_bh(&txq->lock); +} + +/* Set wr_ptr of specific device and txq */ +void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + + spin_lock_bh(&txq->lock); + + txq->write_ptr = ptr; + txq->read_ptr = txq->write_ptr; + + spin_unlock_bh(&txq->lock); +} + +void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, + bool freeze) +{ + int queue; + + for_each_set_bit(queue, &txqs, BITS_PER_LONG) { + struct iwl_txq *txq = trans->txqs.txq[queue]; + unsigned long now; + + spin_lock_bh(&txq->lock); + + now = jiffies; + + if (txq->frozen == freeze) + goto next_queue; + + IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", + freeze ? "Freezing" : "Waking", queue); + + txq->frozen = freeze; + + if (txq->read_ptr == txq->write_ptr) + goto next_queue; + + if (freeze) { + if (unlikely(time_after(now, + txq->stuck_timer.expires))) { + /* + * The timer should have fired, maybe it is + * spinning right now on the lock. + */ + goto next_queue; + } + /* remember how long until the timer fires */ + txq->frozen_expiry_remainder = + txq->stuck_timer.expires - now; + del_timer(&txq->stuck_timer); + goto next_queue; + } + + /* + * Wake a non-empty queue -> arm timer with the + * remainder before it froze + */ + mod_timer(&txq->stuck_timer, + now + txq->frozen_expiry_remainder); + +next_queue: + spin_unlock_bh(&txq->lock); + } +} + diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h index c67577dfa21d..cff694c25ccc 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h @@ -1,60 +1,7 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2020 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2020 Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2020 Intel Corporation + */ #ifndef __iwl_trans_queue_tx_h__ #define __iwl_trans_queue_tx_h__ #include "iwl-fh.h" @@ -227,4 +174,11 @@ void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs); +void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs); +void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); +void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, + bool freeze); +void iwl_txq_progress(struct iwl_txq *txq); +void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); #endif /* __iwl_trans_queue_tx_h__ */ diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 22cfb6452644..9a19046217df 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -3169,22 +3169,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, /* Initialize tasklets for handling hardware IRQ related operations * outside hw IRQ handler */ -#define HOSTAP_TASKLET_INIT(q, f, d) \ -do { memset((q), 0, sizeof(*(q))); (q)->func = (void(*)(unsigned long))(f); } \ -while (0) - HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet, - (unsigned long) local); - - HOSTAP_TASKLET_INIT(&local->info_tasklet, hostap_info_tasklet, - (unsigned long) local); + tasklet_setup(&local->bap_tasklet, hostap_bap_tasklet); + tasklet_setup(&local->info_tasklet, hostap_info_tasklet); hostap_info_init(local); - HOSTAP_TASKLET_INIT(&local->rx_tasklet, - hostap_rx_tasklet, (unsigned long) local); + tasklet_setup(&local->rx_tasklet, hostap_rx_tasklet); skb_queue_head_init(&local->rx_list); - HOSTAP_TASKLET_INIT(&local->sta_tx_exc_tasklet, - hostap_sta_tx_exc_tasklet, (unsigned long) local); + tasklet_setup(&local->sta_tx_exc_tasklet, + hostap_sta_tx_exc_tasklet); skb_queue_head_init(&local->sta_tx_exc_list); INIT_LIST_HEAD(&local->cmd_queue); diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c index 514c7b01dbf6..49766b285230 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c @@ -44,19 +44,8 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) if (local->iw_mode != IW_MODE_MASTER && local->iw_mode != IW_MODE_REPEAT) { - int update = 1; -#ifdef in_atomic - /* RID reading might sleep and it must not be called in - * interrupt context or while atomic. However, this - * function seems to be called while atomic (at least in Linux - * 2.5.59). Update signal quality values only if in suitable - * context. Otherwise, previous values read from tick timer - * will be used. */ - if (in_atomic()) - update = 0; -#endif /* in_atomic */ - - if (update && prism2_update_comms_qual(dev) == 0) + + if (prism2_update_comms_qual(dev) == 0) wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; diff --git a/drivers/net/wireless/intersil/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c index 43790fbea0e0..6d4b7f64efcf 100644 --- a/drivers/net/wireless/intersil/orinoco/hermes.c +++ b/drivers/net/wireless/intersil/orinoco/hermes.c @@ -763,6 +763,7 @@ static const struct hermes_ops hermes_ops_local = { .init_cmd_wait = hermes_doicmd_wait, .allocate = hermes_allocate, .read_ltv = hermes_read_ltv, + .read_ltv_pr = hermes_read_ltv, .write_ltv = hermes_write_ltv, .bap_pread = hermes_bap_pread, .bap_pwrite = hermes_bap_pwrite, diff --git a/drivers/net/wireless/intersil/orinoco/hermes.h b/drivers/net/wireless/intersil/orinoco/hermes.h index 9f668185b7d2..3dc561a5cb7a 100644 --- a/drivers/net/wireless/intersil/orinoco/hermes.h +++ b/drivers/net/wireless/intersil/orinoco/hermes.h @@ -386,6 +386,8 @@ struct hermes_ops { int (*allocate)(struct hermes *hw, u16 size, u16 *fid); int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen, u16 *length, void *buf); + int (*read_ltv_pr)(struct hermes *hw, int bap, u16 rid, + unsigned buflen, u16 *length, void *buf); int (*write_ltv)(struct hermes *hw, int bap, u16 rid, u16 length, const void *value); int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len, @@ -494,6 +496,8 @@ static inline void hermes_clear_words(struct hermes *hw, int off, #define HERMES_READ_RECORD(hw, bap, rid, buf) \ (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf))) +#define HERMES_READ_RECORD_PR(hw, bap, rid, buf) \ + (hw->ops->read_ltv_pr((hw), (bap), (rid), sizeof(*buf), NULL, (buf))) #define HERMES_WRITE_RECORD(hw, bap, rid, buf) \ (hw->ops->write_ltv((hw), (bap), (rid), \ HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf))) @@ -509,6 +513,17 @@ static inline int hermes_read_wordrec(struct hermes *hw, int bap, u16 rid, return err; } +static inline int hermes_read_wordrec_pr(struct hermes *hw, int bap, u16 rid, + u16 *word) +{ + __le16 rec; + int err; + + err = HERMES_READ_RECORD_PR(hw, bap, rid, &rec); + *word = le16_to_cpu(rec); + return err; +} + static inline int hermes_write_wordrec(struct hermes *hw, int bap, u16 rid, u16 word) { diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c index 61af5a28f269..2c7adb4be100 100644 --- a/drivers/net/wireless/intersil/orinoco/hw.c +++ b/drivers/net/wireless/intersil/orinoco/hw.c @@ -78,7 +78,7 @@ int determine_fw_capabilities(struct orinoco_private *priv, char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2))); /* Get the hardware version */ - err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id); + err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_NICID, &nic_id); if (err) { dev_err(dev, "Cannot read hardware identity: error %d\n", err); @@ -101,7 +101,7 @@ int determine_fw_capabilities(struct orinoco_private *priv, priv->firmware_type = determine_firmware_type(&nic_id); /* Get the firmware version */ - err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id); + err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_STAID, &sta_id); if (err) { dev_err(dev, "Cannot read station identity: error %d\n", err); @@ -177,7 +177,7 @@ int determine_fw_capabilities(struct orinoco_private *priv, /* 3Com MAC : 00:50:DA:* */ memset(tmp, 0, sizeof(tmp)); /* Get the Symbol firmware version */ - err = hw->ops->read_ltv(hw, USER_BAP, + err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_SECONDARYVERSION_SYMBOL, SYMBOL_MAX_VER_LEN, NULL, &tmp); if (err) { @@ -286,7 +286,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) u16 reclen; /* Get the MAC address */ - err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, + err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, ETH_ALEN, NULL, dev_addr); if (err) { dev_warn(dev, "Failed to read MAC address!\n"); @@ -296,7 +296,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) dev_dbg(dev, "MAC address %pM\n", dev_addr); /* Get the station name */ - err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, + err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNNAME, sizeof(nickbuf), &reclen, &nickbuf); if (err) { dev_err(dev, "failed to read station name\n"); @@ -312,7 +312,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) dev_dbg(dev, "Station name \"%s\"\n", priv->nick); /* Get allowed channels */ - err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CHANNELLIST, &priv->channel_mask); if (err) { dev_err(dev, "Failed to read channel list!\n"); @@ -320,13 +320,13 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) } /* Get initial AP density */ - err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &priv->ap_density); if (err || priv->ap_density < 1 || priv->ap_density > 3) priv->has_sensitivity = 0; /* Get initial RTS threshold */ - err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, &priv->rts_thresh); if (err) { dev_err(dev, "Failed to read RTS threshold!\n"); @@ -335,11 +335,11 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) /* Get initial fragmentation settings */ if (priv->has_mwo) - err = hermes_read_wordrec(hw, USER_BAP, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFMWOROBUST_AGERE, &priv->mwo_robust); else - err = hermes_read_wordrec(hw, USER_BAP, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD, &priv->frag_thresh); if (err) { @@ -351,7 +351,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) if (priv->has_pm) { priv->pm_on = 0; priv->pm_mcast = 1; - err = hermes_read_wordrec(hw, USER_BAP, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, &priv->pm_period); if (err) { @@ -359,7 +359,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) "period!\n"); goto out; } - err = hermes_read_wordrec(hw, USER_BAP, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &priv->pm_timeout); if (err) { @@ -371,7 +371,7 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) /* Preamble setup */ if (priv->has_preamble) { - err = hermes_read_wordrec(hw, USER_BAP, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL, &priv->preamble); if (err) { @@ -381,21 +381,21 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) } /* Retry settings */ - err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT, &priv->short_retry_limit); if (err) { dev_err(dev, "Failed to read short retry limit\n"); goto out; } - err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT, &priv->long_retry_limit); if (err) { dev_err(dev, "Failed to read long retry limit\n"); goto out; } - err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME, + err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME, &priv->retry_lifetime); if (err) { dev_err(dev, "Failed to read max retry lifetime\n"); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index b849d27bd741..dd31929261ab 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -665,26 +665,43 @@ static void ezusb_request_in_callback(struct ezusb_priv *upriv, } /* switch */ } +typedef void (*ezusb_ctx_wait)(struct ezusb_priv *, struct request_context *); -static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, - struct request_context *ctx) +static void ezusb_req_ctx_wait_compl(struct ezusb_priv *upriv, + struct request_context *ctx) { switch (ctx->state) { case EZUSB_CTX_QUEUED: case EZUSB_CTX_REQ_SUBMITTED: case EZUSB_CTX_REQ_COMPLETE: case EZUSB_CTX_RESP_RECEIVED: - if (in_softirq()) { - /* If we get called from a timer, timeout timers don't - * get the chance to run themselves. So we make sure - * that we don't sleep for ever */ - int msecs = DEF_TIMEOUT * (1000 / HZ); - - while (!try_wait_for_completion(&ctx->done) && msecs--) - udelay(1000); - } else { - wait_for_completion(&ctx->done); - } + wait_for_completion(&ctx->done); + break; + default: + /* Done or failed - nothing to wait for */ + break; + } +} + +static void ezusb_req_ctx_wait_poll(struct ezusb_priv *upriv, + struct request_context *ctx) +{ + int msecs; + + switch (ctx->state) { + case EZUSB_CTX_QUEUED: + case EZUSB_CTX_REQ_SUBMITTED: + case EZUSB_CTX_REQ_COMPLETE: + case EZUSB_CTX_RESP_RECEIVED: + /* If we get called from a timer or with our lock acquired, then + * we can't wait for the completion and have to poll. This won't + * happen if the USB controller completes the URB requests in + * BH. + */ + msecs = DEF_TIMEOUT * (1000 / HZ); + + while (!try_wait_for_completion(&ctx->done) && msecs--) + udelay(1000); break; default: /* Done or failed - nothing to wait for */ @@ -692,6 +709,12 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, } } +static void ezusb_req_ctx_wait_skip(struct ezusb_priv *upriv, + struct request_context *ctx) +{ + WARN(1, "Shouldn't be invoked for in_rid\n"); +} + static inline u16 build_crc(struct ezusb_packet *data) { u16 crc = 0; @@ -853,14 +876,13 @@ static int ezusb_firmware_download(struct ezusb_priv *upriv, static int ezusb_access_ltv(struct ezusb_priv *upriv, struct request_context *ctx, u16 length, const void *data, u16 frame_type, - void *ans_buff, unsigned ans_size, u16 *ans_length) + void *ans_buff, unsigned ans_size, u16 *ans_length, + ezusb_ctx_wait ezusb_ctx_wait_func) { int req_size; int retval = 0; enum ezusb_state state; - BUG_ON(in_irq()); - if (!upriv->udev) { retval = -ENODEV; goto exit; @@ -885,7 +907,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv, spin_unlock_bh(&upriv->reply_count_lock); if (ctx->in_rid) - ezusb_req_ctx_wait(upriv, ctx); + ezusb_ctx_wait_func(upriv, ctx); state = ctx->state; switch (state) { @@ -946,8 +968,9 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv, return retval; } -static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, - u16 length, const void *data) +static int __ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, + u16 length, const void *data, + ezusb_ctx_wait ezusb_ctx_wait_func) { struct ezusb_priv *upriv = hw->priv; u16 frame_type; @@ -973,11 +996,20 @@ static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, frame_type = EZUSB_FRAME_CONTROL; return ezusb_access_ltv(upriv, ctx, length, data, frame_type, - NULL, 0, NULL); + NULL, 0, NULL, ezusb_ctx_wait_func); } -static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, - unsigned bufsize, u16 *length, void *buf) +static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, + u16 length, const void *data) +{ + return __ezusb_write_ltv(hw, bap, rid, length, data, + ezusb_req_ctx_wait_poll); +} + +static int __ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, + unsigned bufsize, u16 *length, void *buf, + ezusb_ctx_wait ezusb_ctx_wait_func) + { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; @@ -990,34 +1022,33 @@ static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, return -ENOMEM; return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL, - buf, bufsize, length); + buf, bufsize, length, ezusb_req_ctx_wait_poll); } -static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1, - u16 parm2, struct hermes_response *resp) +static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, + unsigned bufsize, u16 *length, void *buf) { - struct ezusb_priv *upriv = hw->priv; - struct request_context *ctx; + return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf, + ezusb_req_ctx_wait_poll); +} - __le16 data[4] = { - cpu_to_le16(cmd), - cpu_to_le16(parm0), - cpu_to_le16(parm1), - cpu_to_le16(parm2), - }; - netdev_dbg(upriv->dev, - "0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X\n", cmd, - parm0, parm1, parm2); - ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); - if (!ctx) - return -ENOMEM; +static int ezusb_read_ltv_preempt(struct hermes *hw, int bap, u16 rid, + unsigned bufsize, u16 *length, void *buf) +{ + return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf, + ezusb_req_ctx_wait_compl); +} - return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, - EZUSB_FRAME_CONTROL, NULL, 0, NULL); +static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1, + u16 parm2, struct hermes_response *resp) +{ + WARN_ON_ONCE(1); + return -EINVAL; } -static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, - struct hermes_response *resp) +static int __ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, + struct hermes_response *resp, + ezusb_ctx_wait ezusb_ctx_wait_func) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; @@ -1034,7 +1065,14 @@ static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, - EZUSB_FRAME_CONTROL, NULL, 0, NULL); + EZUSB_FRAME_CONTROL, NULL, 0, NULL, + ezusb_ctx_wait_func); +} + +static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, + struct hermes_response *resp) +{ + return __ezusb_docmd_wait(hw, cmd, parm0, resp, ezusb_req_ctx_wait_poll); } static int ezusb_bap_pread(struct hermes *hw, int bap, @@ -1092,7 +1130,7 @@ static int ezusb_read_pda(struct hermes *hw, __le16 *pda, return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4, - NULL); + NULL, ezusb_req_ctx_wait_compl); } static int ezusb_program_init(struct hermes *hw, u32 entry_point) @@ -1106,7 +1144,8 @@ static int ezusb_program_init(struct hermes *hw, u32 entry_point) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, - EZUSB_FRAME_CONTROL, NULL, 0, NULL); + EZUSB_FRAME_CONTROL, NULL, 0, NULL, + ezusb_req_ctx_wait_compl); } static int ezusb_program_end(struct hermes *hw) @@ -1119,7 +1158,8 @@ static int ezusb_program_end(struct hermes *hw) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, 0, NULL, - EZUSB_FRAME_CONTROL, NULL, 0, NULL); + EZUSB_FRAME_CONTROL, NULL, 0, NULL, + ezusb_req_ctx_wait_compl); } static int ezusb_program_bytes(struct hermes *hw, const char *buf, @@ -1135,7 +1175,8 @@ static int ezusb_program_bytes(struct hermes *hw, const char *buf, return -ENOMEM; err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data, - EZUSB_FRAME_CONTROL, NULL, 0, NULL); + EZUSB_FRAME_CONTROL, NULL, 0, NULL, + ezusb_req_ctx_wait_compl); if (err) return err; @@ -1144,7 +1185,8 @@ static int ezusb_program_bytes(struct hermes *hw, const char *buf, return -ENOMEM; return ezusb_access_ltv(upriv, ctx, len, buf, - EZUSB_FRAME_CONTROL, NULL, 0, NULL); + EZUSB_FRAME_CONTROL, NULL, 0, NULL, + ezusb_req_ctx_wait_compl); } static int ezusb_program(struct hermes *hw, const char *buf, @@ -1223,13 +1265,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->len < ETH_HLEN) goto drop; - ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); - if (!ctx) - goto busy; - - memset(ctx->buf, 0, BULK_BUF_SIZE); - buf = ctx->buf->data; - tx_control = 0; err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, @@ -1237,6 +1272,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) if (err) goto drop; + ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); + if (!ctx) + goto drop; + + memset(ctx->buf, 0, BULK_BUF_SIZE); + buf = ctx->buf->data; + { __le16 *tx_cntl = (__le16 *)buf; *tx_cntl = cpu_to_le16(tx_control); @@ -1264,7 +1306,8 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) tx_size = ALIGN(buf - ctx->buf->data, 2); err = ezusb_access_ltv(upriv, ctx, tx_size, NULL, - EZUSB_FRAME_DATA, NULL, 0, NULL); + EZUSB_FRAME_DATA, NULL, 0, NULL, + ezusb_req_ctx_wait_skip); if (err) { netif_start_queue(dev); @@ -1349,7 +1392,6 @@ static int ezusb_init(struct hermes *hw) struct ezusb_priv *upriv = hw->priv; int retval; - BUG_ON(in_interrupt()); if (!upriv) return -EINVAL; @@ -1362,14 +1404,16 @@ static int ezusb_init(struct hermes *hw) usb_kill_urb(upriv->read_urb); ezusb_submit_in_urb(upriv); - retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1, - HERMES_BYTES_TO_RECLEN(2), "\x10\x00"); + retval = __ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1, + HERMES_BYTES_TO_RECLEN(2), "\x10\x00", + ezusb_req_ctx_wait_compl); if (retval < 0) { printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval); return retval; } - retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL); + retval = __ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL, + ezusb_req_ctx_wait_compl); if (retval < 0) { printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval); return retval; @@ -1448,7 +1492,6 @@ static inline void ezusb_delete(struct ezusb_priv *upriv) struct list_head *tmp_item; unsigned long flags; - BUG_ON(in_interrupt()); BUG_ON(!upriv); mutex_lock(&upriv->mtx); @@ -1533,6 +1576,7 @@ static const struct hermes_ops ezusb_ops = { .init_cmd_wait = ezusb_doicmd_wait, .allocate = ezusb_allocate, .read_ltv = ezusb_read_ltv, + .read_ltv_pr = ezusb_read_ltv_preempt, .write_ltv = ezusb_write_ltv, .bap_pread = ezusb_bap_pread, .read_pda = ezusb_read_pda, diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c index 2076f449b6e2..5e5ceafe098b 100644 --- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c +++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c @@ -54,7 +54,7 @@ static const unsigned char scan_rate_list[] = { 2, 4, 11, 22, /** * prism54_mib_mode_helper - MIB change mode helper function - * @mib: the &struct islpci_mib object to modify + * @priv: the &struct islpci_private object to modify * @iw_mode: new mode (%IW_MODE_*) * * This is a helper function, hence it does not lock. Make sure @@ -114,14 +114,13 @@ prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode) return 0; } -/** +/* * prism54_mib_init - fill MIB cache with defaults * * this function initializes the struct given as @mib with defaults, * of which many are retrieved from the global module parameter * variables. */ - void prism54_mib_init(islpci_private *priv) { diff --git a/drivers/net/wireless/marvell/mwifiex/Makefile b/drivers/net/wireless/marvell/mwifiex/Makefile index fdfd9bf15ed4..162d557b78af 100644 --- a/drivers/net/wireless/marvell/mwifiex/Makefile +++ b/drivers/net/wireless/marvell/mwifiex/Makefile @@ -1,8 +1,8 @@ # -# Copyright (C) 2011-2014, Marvell International Ltd. +# Copyright 2011-2020 NXP # -# This software file (the "File") is distributed by Marvell International -# Ltd. under the terms of the GNU General Public License Version 2, June 1991 +# This software file (the "File") is distributed by NXP +# under the terms of the GNU General Public License Version 2, June 1991 # (the "License"). You may use, redistribute and/or modify this File in # accordance with the terms and conditions of the License, a copy of which # is available by writing to the Free Software Foundation, Inc., diff --git a/drivers/net/wireless/marvell/mwifiex/README b/drivers/net/wireless/marvell/mwifiex/README index 588fcbe38374..5e39248b73b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/README +++ b/drivers/net/wireless/marvell/mwifiex/README @@ -1,7 +1,8 @@ -# Copyright (C) 2011-2014, Marvell International Ltd. # -# This software file (the "File") is distributed by Marvell International -# Ltd. under the terms of the GNU General Public License Version 2, June 1991 +# Copyright 2011-2020 NXP +# +# This software file (the "File") is distributed by NXP +# under the terms of the GNU General Public License Version 2, June 1991 # (the "License"). You may use, redistribute and/or modify this File in # accordance with the terms and conditions of the License, a copy of which # is available by writing to the Free Software Foundation, Inc., diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index 5934f7147547..173ccf79cbfc 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -877,6 +877,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv, memset(adhoc_start->ssid, 0, IEEE80211_MAX_SSID_LEN); + if (req_ssid->ssid_len > IEEE80211_MAX_SSID_LEN) + req_ssid->ssid_len = IEEE80211_MAX_SSID_LEN; memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len); mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n", diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 9ba8a8f64976..ee52fb839ef7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1455,7 +1455,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) } /* - * This function gets called during PCIe function level reset. + * This function can be used for shutting down the adapter SW. */ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) { @@ -1471,6 +1471,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); mwifiex_deauthenticate(priv, NULL); + mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN); + mwifiex_uninit_sw(adapter); adapter->is_up = false; @@ -1481,7 +1483,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) } EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); -/* This function gets called during PCIe function level reset. Required +/* This function can be used for reinitting the adapter SW. Required * code is extracted from mwifiex_add_card() */ int diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 6a10ff0377a2..5f0a61b974ee 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -429,7 +429,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) struct mwifiex_private *priv; const struct mwifiex_pcie_card_reg *reg; u32 fw_status; - int ret; card = pci_get_drvdata(pdev); @@ -441,7 +440,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev) reg = card->pcie.reg; if (reg) - ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + mwifiex_read_reg(adapter, reg->fw_status, &fw_status); else fw_status = -1; @@ -526,6 +525,8 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev) clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags); clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags); mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); + + card->pci_reset_ongoing = true; } /* @@ -554,6 +555,8 @@ static void mwifiex_pcie_reset_done(struct pci_dev *pdev) dev_err(&pdev->dev, "reinit failed: %d\n", ret); else mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); + + card->pci_reset_ongoing = false; } static const struct pci_error_handlers mwifiex_pcie_err_handler = { @@ -3139,12 +3142,23 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; - int ret; u32 fw_status; - cancel_work_sync(&card->work); + /* Perform the cancel_work_sync() only when we're not resetting + * the card. It's because that function never returns if we're + * in reset path. If we're here when resetting the card, it means + * that we failed to reset the card (reset failure path). + */ + if (!card->pci_reset_ongoing) { + mwifiex_dbg(adapter, MSG, "performing cancel_work_sync()...\n"); + cancel_work_sync(&card->work); + mwifiex_dbg(adapter, MSG, "cancel_work_sync() done\n"); + } else { + mwifiex_dbg(adapter, MSG, + "skipped cancel_work_sync() because we're in card reset failure path\n"); + } - ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status); + mwifiex_read_reg(adapter, reg->fw_status, &fw_status); if (fw_status == FIRMWARE_READY_PCIE) { mwifiex_dbg(adapter, INFO, "Clearing driver ready signature\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 843d57eda820..5ed613d65709 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -242,6 +242,8 @@ struct pcie_service_card { struct mwifiex_msix_context share_irq_ctx; struct work_struct work; unsigned long work_flags; + + bool pci_reset_ongoing; }; static inline int diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index dec534a6ddb1..5648512c9300 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -43,8 +43,6 @@ #define BLOCK_MODE 1 #define BYTE_MODE 0 -#define REG_PORT 0 - #define MWIFIEX_SDIO_IO_PORT_MASK 0xfffff #define MWIFIEX_SDIO_BYTE_MODE_MASK 0x80000000 diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 119ccacd1fcc..6b5d35d9e69f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -201,6 +201,7 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv, mwifiex_dbg(priv->adapter, INFO, "info: SNMP_RESP: DTIM period=%u\n", ul_temp); + break; default: break; } @@ -1393,6 +1394,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_TDLS_OPER: ret = mwifiex_ret_tdls_oper(priv, resp); + break; case HostCmd_CMD_MC_POLICY: break; case HostCmd_CMD_CHAN_REPORT_REQUEST: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index bc79ca4cb803..68c63268e2e6 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -99,6 +99,7 @@ static int mwifiex_check_ibss_peer_capabilities(struct mwifiex_private *priv, case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895: sta_ptr->max_amsdu = MWIFIEX_TX_DATA_BUF_SIZE_4K; + break; default: break; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index b48a85d791f6..18e89777b784 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -108,6 +108,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv, if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2) bss_config->wpa_cfg.pairwise_cipher_wpa2 |= CIPHER_AES_CCMP; + break; default: break; } diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index b8f19ca73414..0b375608df7d 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -1396,6 +1396,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, break; case 0: mwifiex_write_data_complete(adapter, skb, 0, ret); + break; default: break; } diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 23efd7075df6..abf3b0233ccc 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -605,8 +605,9 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length) dma_addr_t dma_addr; int loops; - dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, dma_addr)) + dma_addr = dma_map_single(&priv->pdev->dev, data, length, + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, dma_addr)) return -ENOMEM; iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR); @@ -635,7 +636,7 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length) udelay(1); } while (--loops); - pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, dma_addr, length, DMA_TO_DEVICE); return loops ? 0 : -ETIMEDOUT; } @@ -1169,7 +1170,8 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size; - rxq->rxd = pci_zalloc_consistent(priv->pdev, size, &rxq->rxd_dma); + rxq->rxd = dma_alloc_coherent(&priv->pdev->dev, size, &rxq->rxd_dma, + GFP_KERNEL); if (rxq->rxd == NULL) { wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n"); return -ENOMEM; @@ -1177,7 +1179,8 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { - pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); + dma_free_coherent(&priv->pdev->dev, size, rxq->rxd, + rxq->rxd_dma); return -ENOMEM; } @@ -1218,7 +1221,7 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit) if (skb == NULL) break; - addr = pci_map_single(priv->pdev, skb->data, + addr = dma_map_single(&priv->pdev->dev, skb->data, MWL8K_RX_MAXSZ, DMA_FROM_DEVICE); rxq->rxd_count++; @@ -1249,9 +1252,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index) for (i = 0; i < MWL8K_RX_DESCS; i++) { if (rxq->buf[i].skb != NULL) { - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, dma_unmap_addr(&rxq->buf[i], dma), - MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); + MWL8K_RX_MAXSZ, DMA_FROM_DEVICE); dma_unmap_addr_set(&rxq->buf[i], dma, 0); kfree_skb(rxq->buf[i].skb); @@ -1262,9 +1265,9 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index) kfree(rxq->buf); rxq->buf = NULL; - pci_free_consistent(priv->pdev, - MWL8K_RX_DESCS * priv->rxd_ops->rxd_size, - rxq->rxd, rxq->rxd_dma); + dma_free_coherent(&priv->pdev->dev, + MWL8K_RX_DESCS * priv->rxd_ops->rxd_size, rxq->rxd, + rxq->rxd_dma); rxq->rxd = NULL; } @@ -1343,9 +1346,9 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) rxq->buf[rxq->head].skb = NULL; - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, dma_unmap_addr(&rxq->buf[rxq->head], dma), - MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); + MWL8K_RX_MAXSZ, DMA_FROM_DEVICE); dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); rxq->head++; @@ -1460,7 +1463,8 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); - txq->txd = pci_zalloc_consistent(priv->pdev, size, &txq->txd_dma); + txq->txd = dma_alloc_coherent(&priv->pdev->dev, size, &txq->txd_dma, + GFP_KERNEL); if (txq->txd == NULL) { wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n"); return -ENOMEM; @@ -1468,7 +1472,8 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { - pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); + dma_free_coherent(&priv->pdev->dev, size, txq->txd, + txq->txd_dma); return -ENOMEM; } @@ -1707,7 +1712,7 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force) txq->skb[tx] = NULL; BUG_ON(skb == NULL); - pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, addr, size, DMA_TO_DEVICE); mwl8k_remove_dma_header(skb, tx_desc->qos_control); @@ -1774,9 +1779,9 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index) kfree(txq->skb); txq->skb = NULL; - pci_free_consistent(priv->pdev, - MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), - txq->txd, txq->txd_dma); + dma_free_coherent(&priv->pdev->dev, + MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), + txq->txd, txq->txd_dma); txq->txd = NULL; } @@ -2041,10 +2046,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, qos |= MWL8K_QOS_ACK_POLICY_NORMAL; } - dma = pci_map_single(priv->pdev, skb->data, - skb->len, PCI_DMA_TODEVICE); + dma = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); - if (pci_dma_mapping_error(priv->pdev, dma)) { + if (dma_mapping_error(&priv->pdev->dev, dma)) { wiphy_debug(hw->wiphy, "failed to dma map skb, dropping TX frame.\n"); if (start_ba_session) { @@ -2077,8 +2082,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, } mwl8k_tx_start(priv); spin_unlock_bh(&priv->tx_lock); - pci_unmap_single(priv->pdev, dma, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, dma, skb->len, + DMA_TO_DEVICE); dev_kfree_skb(skb); return; } @@ -2237,9 +2242,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) cmd->result = (__force __le16) 0xffff; dma_size = le16_to_cpu(cmd->length); - dma_addr = pci_map_single(priv->pdev, cmd, dma_size, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(priv->pdev, dma_addr)) { + dma_addr = dma_map_single(&priv->pdev->dev, cmd, dma_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&priv->pdev->dev, dma_addr)) { rc = -ENOMEM; goto exit; } @@ -2257,8 +2262,8 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd) priv->hostcmd_wait = NULL; - pci_unmap_single(priv->pdev, dma_addr, dma_size, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&priv->pdev->dev, dma_addr, dma_size, + DMA_BIDIRECTIONAL); if (!timeout) { wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n", @@ -6126,7 +6131,8 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) tasklet_disable(&priv->poll_rx_task); /* Power management cookie */ - priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); + priv->cookie = dma_alloc_coherent(&priv->pdev->dev, 4, + &priv->cookie_dma, GFP_KERNEL); if (priv->cookie == NULL) return -ENOMEM; @@ -6174,8 +6180,8 @@ err_unprobe_hw: err_free_cookie: if (priv->cookie != NULL) - pci_free_consistent(priv->pdev, 4, - priv->cookie, priv->cookie_dma); + dma_free_coherent(&priv->pdev->dev, 4, priv->cookie, + priv->cookie_dma); return rc; } @@ -6338,7 +6344,7 @@ static void mwl8k_remove(struct pci_dev *pdev) mwl8k_rxq_deinit(hw, 0); - pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); + dma_free_coherent(&priv->pdev->dev, 4, priv->cookie, priv->cookie_dma); unmap: pci_iounmap(pdev, priv->regs); diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index 52f583cb1418..d4a6b8108971 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -30,8 +30,8 @@ int mt76_queues_read(struct seq_file *s, void *data) struct mt76_dev *dev = dev_get_drvdata(s->private); int i; - for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { - struct mt76_queue *q = dev->q_tx[i]; + for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) { + struct mt76_queue *q = dev->phy.q_tx[i]; if (!q) continue; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 214fc95b8a33..73eeb00d5aa6 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -72,9 +72,11 @@ mt76_free_pending_txwi(struct mt76_dev *dev) { struct mt76_txwi_cache *t; + local_bh_disable(); while ((t = __mt76_get_txwi(dev)) != NULL) dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); + local_bh_enable(); } static int @@ -86,6 +88,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, int i; spin_lock_init(&q->lock); + spin_lock_init(&q->cleanup_lock); q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; q->ndesc = n_desc; @@ -215,16 +218,15 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) } static void -mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) +mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) { - struct mt76_queue *q = dev->q_tx[qid]; struct mt76_queue_entry entry; - bool wake = false; int last; if (!q) return; + spin_lock_bh(&q->cleanup_lock); if (flush) last = -1; else @@ -237,13 +239,13 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) if (entry.txwi) { if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) mt76_put_txwi(dev, entry.txwi); - wake = !flush; } if (!flush && q->tail == last) last = readl(&q->regs->dma_idx); } + spin_unlock_bh(&q->cleanup_lock); if (flush) { spin_lock_bh(&q->lock); @@ -252,16 +254,8 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) spin_unlock_bh(&q->lock); } - wake = wake && q->stopped && - qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; - if (wake) - q->stopped = false; - if (!q->queued) wake_up(&dev->tx_wait); - - if (wake) - ieee80211_wake_queue(dev->hw, qid); } static void * @@ -312,10 +306,9 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, } static int -mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, +mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info) { - struct mt76_queue *q = dev->q_tx[qid]; struct mt76_queue_buf buf; dma_addr_t addr; @@ -343,11 +336,10 @@ error: } static int -mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, +mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { - struct mt76_queue *q = dev->q_tx[qid]; struct mt76_tx_info tx_info = { .skb = skb, }; @@ -397,7 +389,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); - ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); + ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info); dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, DMA_TO_DEVICE); if (ret < 0) @@ -661,8 +653,15 @@ void mt76_dma_cleanup(struct mt76_dev *dev) mt76_worker_disable(&dev->tx_worker); netif_napi_del(&dev->tx_napi); - for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) - mt76_dma_tx_cleanup(dev, i, true); + + for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) { + mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true); + if (dev->phy2) + mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true); + } + + for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) + mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); mt76_for_each_q_rx(dev, i) { netif_napi_del(&dev->napi[i]); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 3044e0069991..90278aeb6721 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -88,8 +88,10 @@ out_put_node: } void -mt76_eeprom_override(struct mt76_dev *dev) +mt76_eeprom_override(struct mt76_phy *phy) { + struct mt76_dev *dev = phy->dev; + #ifdef CONFIG_OF struct device_node *np = dev->dev->of_node; const u8 *mac = NULL; @@ -97,14 +99,14 @@ mt76_eeprom_override(struct mt76_dev *dev) if (np) mac = of_get_mac_address(np); if (!IS_ERR_OR_NULL(mac)) - ether_addr_copy(dev->macaddr, mac); + ether_addr_copy(phy->macaddr, mac); #endif - if (!is_valid_ether_addr(dev->macaddr)) { - eth_random_addr(dev->macaddr); + if (!is_valid_ether_addr(phy->macaddr)) { + eth_random_addr(phy->macaddr); dev_info(dev->dev, "Invalid MAC address, using random address %pM\n", - dev->macaddr); + phy->macaddr); } } EXPORT_SYMBOL_GPL(mt76_eeprom_override); diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 4befe7f937a9..a840396f2c74 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -159,21 +159,22 @@ static void mt76_init_stream_cap(struct mt76_phy *phy, void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) { - if (phy->dev->cap.has_2ghz) + if (phy->cap.has_2ghz) mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); - if (phy->dev->cap.has_5ghz) + if (phy->cap.has_5ghz) mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); } EXPORT_SYMBOL_GPL(mt76_set_stream_caps); static int -mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, +mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, const struct ieee80211_channel *chan, int n_chan, struct ieee80211_rate *rates, int n_rates, bool vht) { struct ieee80211_supported_band *sband = &msband->sband; - struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; + struct ieee80211_sta_ht_cap *ht_cap; + struct mt76_dev *dev = phy->dev; void *chanlist; int size; @@ -203,7 +204,7 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - mt76_init_stream_cap(&dev->phy, sband, vht); + mt76_init_stream_cap(phy, sband, vht); if (!vht) return 0; @@ -221,27 +222,25 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, } static int -mt76_init_sband_2g(struct mt76_dev *dev, struct ieee80211_rate *rates, +mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, int n_rates) { - dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = &dev->phy.sband_2g.sband; + phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; - return mt76_init_sband(dev, &dev->phy.sband_2g, - mt76_channels_2ghz, - ARRAY_SIZE(mt76_channels_2ghz), - rates, n_rates, false); + return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, + ARRAY_SIZE(mt76_channels_2ghz), rates, + n_rates, false); } static int -mt76_init_sband_5g(struct mt76_dev *dev, struct ieee80211_rate *rates, +mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, int n_rates, bool vht) { - dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = &dev->phy.sband_5g.sband; + phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; - return mt76_init_sband(dev, &dev->phy.sband_5g, - mt76_channels_5ghz, - ARRAY_SIZE(mt76_channels_5ghz), - rates, n_rates, vht); + return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, + ARRAY_SIZE(mt76_channels_5ghz), rates, + n_rates, vht); } static void @@ -274,12 +273,13 @@ mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, } static void -mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) +mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) { + struct mt76_dev *dev = phy->dev; struct wiphy *wiphy = hw->wiphy; SET_IEEE80211_DEV(hw, dev->dev); - SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); + SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | @@ -305,6 +305,7 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) { ieee80211_hw_set(hw, TX_AMSDU); @@ -314,7 +315,6 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy->interface_modes = @@ -333,65 +333,57 @@ mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, const struct ieee80211_ops *ops) { struct ieee80211_hw *hw; + unsigned int phy_size; struct mt76_phy *phy; - unsigned int phy_size, chan_size; - unsigned int size_2g, size_5g; - void *priv; phy_size = ALIGN(sizeof(*phy), 8); - chan_size = sizeof(dev->phy.sband_2g.chan[0]); - size_2g = ALIGN(ARRAY_SIZE(mt76_channels_2ghz) * chan_size, 8); - size_5g = ALIGN(ARRAY_SIZE(mt76_channels_5ghz) * chan_size, 8); - - size += phy_size + size_2g + size_5g; - hw = ieee80211_alloc_hw(size, ops); + hw = ieee80211_alloc_hw(size + phy_size, ops); if (!hw) return NULL; phy = hw->priv; phy->dev = dev; phy->hw = hw; + phy->priv = hw->priv + phy_size; - mt76_phy_init(dev, hw); - - priv = hw->priv + phy_size; + return phy; +} +EXPORT_SYMBOL_GPL(mt76_alloc_phy); - phy->sband_2g = dev->phy.sband_2g; - phy->sband_2g.chan = priv; - priv += size_2g; +int mt76_register_phy(struct mt76_phy *phy, bool vht, + struct ieee80211_rate *rates, int n_rates) +{ + int ret; - phy->sband_5g = dev->phy.sband_5g; - phy->sband_5g.chan = priv; - priv += size_5g; + mt76_phy_init(phy, phy->hw); - phy->priv = priv; + if (phy->cap.has_2ghz) { + ret = mt76_init_sband_2g(phy, rates, n_rates); + if (ret) + return ret; + } - hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; - hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; + if (phy->cap.has_5ghz) { + ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); + if (ret) + return ret; + } + wiphy_read_of_freq_limits(phy->hw->wiphy); mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); - return phy; -} -EXPORT_SYMBOL_GPL(mt76_alloc_phy); - -int -mt76_register_phy(struct mt76_phy *phy) -{ - int ret; - ret = ieee80211_register_hw(phy->hw); if (ret) return ret; phy->dev->phy2 = phy; + return 0; } EXPORT_SYMBOL_GPL(mt76_register_phy); -void -mt76_unregister_phy(struct mt76_phy *phy) +void mt76_unregister_phy(struct mt76_phy *phy) { struct mt76_dev *dev = phy->dev; @@ -459,16 +451,16 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, int ret; dev_set_drvdata(dev->dev, dev); - mt76_phy_init(dev, hw); + mt76_phy_init(phy, hw); - if (dev->cap.has_2ghz) { - ret = mt76_init_sband_2g(dev, rates, n_rates); + if (phy->cap.has_2ghz) { + ret = mt76_init_sband_2g(phy, rates, n_rates); if (ret) return ret; } - if (dev->cap.has_5ghz) { - ret = mt76_init_sband_5g(dev, rates + 4, n_rates - 4, vht); + if (phy->cap.has_5ghz) { + ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); if (ret) return ret; } @@ -539,14 +531,11 @@ EXPORT_SYMBOL_GPL(mt76_rx); bool mt76_has_tx_pending(struct mt76_phy *phy) { - struct mt76_dev *dev = phy->dev; struct mt76_queue *q; - int i, offset; - - offset = __MT_TXQ_MAX * (phy != &dev->phy); + int i; for (i = 0; i < __MT_TXQ_MAX; i++) { - q = dev->q_tx[offset + i]; + q = phy->q_tx[i]; if (q && q->queued) return true; } @@ -842,7 +831,7 @@ mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) return; if (!wcid || !wcid->sta) { - if (!ether_addr_equal(hdr->addr1, dev->macaddr)) + if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) return; wcid = NULL; @@ -932,7 +921,8 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, { struct ieee80211_sta *sta; struct ieee80211_hw *hw; - struct sk_buff *skb; + struct sk_buff *skb, *tmp; + LIST_HEAD(list); spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { @@ -942,9 +932,19 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, } mt76_rx_convert(dev, skb, &hw, &sta); - ieee80211_rx_napi(hw, sta, skb, napi); + ieee80211_rx_list(hw, sta, skb, &list); } spin_unlock(&dev->rx_lock); + + if (!napi) { + netif_receive_skb_list(&list); + return; + } + + list_for_each_entry_safe(skb, tmp, &list, list) { + skb_list_del_init(skb); + napi_gro_receive(napi, skb); + } } void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, @@ -1202,3 +1202,22 @@ int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) return 0; } EXPORT_SYMBOL_GPL(mt76_get_antenna); + +struct mt76_queue * +mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, + int ring_base) +{ + struct mt76_queue *hwq; + int err; + + hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); + if (!hwq) + return ERR_PTR(-ENOMEM); + + err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); + if (err < 0) + return ERR_PTR(err); + + return hwq; +} +EXPORT_SYMBOL_GPL(mt76_init_queue); diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c index ade61a5334c6..d3a5e2c4f12a 100644 --- a/drivers/net/wireless/mediatek/mt76/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -50,3 +50,83 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb) wake_up(&dev->mcu.wait); } EXPORT_SYMBOL_GPL(mt76_mcu_rx_event); + +int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, + int len, bool wait_resp, struct sk_buff **ret_skb) +{ + struct sk_buff *skb; + + if (dev->mcu_ops->mcu_send_msg) + return dev->mcu_ops->mcu_send_msg(dev, cmd, data, len, wait_resp); + + skb = mt76_mcu_msg_alloc(dev, data, len); + if (!skb) + return -ENOMEM; + + return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, ret_skb); +} +EXPORT_SYMBOL_GPL(mt76_mcu_send_and_get_msg); + +int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp, + struct sk_buff **ret_skb) +{ + unsigned long expires; + int ret, seq; + + if (ret_skb) + *ret_skb = NULL; + + mutex_lock(&dev->mcu.mutex); + + ret = dev->mcu_ops->mcu_skb_send_msg(dev, skb, cmd, &seq); + if (ret < 0) + goto out; + + if (!wait_resp) { + ret = 0; + goto out; + } + + expires = jiffies + dev->mcu.timeout; + + do { + skb = mt76_mcu_get_response(dev, expires); + ret = dev->mcu_ops->mcu_parse_response(dev, cmd, skb, seq); + if (!ret && ret_skb) + *ret_skb = skb; + else + dev_kfree_skb(skb); + } while (ret == -EAGAIN); + +out: + mutex_unlock(&dev->mcu.mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(mt76_mcu_skb_send_and_get_msg); + +int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, + int len) +{ + int err, cur_len; + + while (len > 0) { + cur_len = min_t(int, 4096 - dev->mcu_ops->headroom, len); + + err = mt76_mcu_send_msg(dev, cmd, data, cur_len, false); + if (err) + return err; + + data += cur_len; + len -= cur_len; + + if (dev->queue_ops->tx_cleanup) + dev->queue_ops->tx_cleanup(dev, + dev->q_mcu[MT_MCUQ_FWDL], + false); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_mcu_send_firmware); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index a5be66de1cff..3e496a188bf0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -64,18 +64,23 @@ enum mt76_txq_id { MT_TXQ_BE = IEEE80211_AC_BE, MT_TXQ_BK = IEEE80211_AC_BK, MT_TXQ_PSD, - MT_TXQ_MCU, - MT_TXQ_MCU_WA, MT_TXQ_BEACON, MT_TXQ_CAB, - MT_TXQ_FWDL, __MT_TXQ_MAX }; +enum mt76_mcuq_id { + MT_MCUQ_WM, + MT_MCUQ_WA, + MT_MCUQ_FWDL, + __MT_MCUQ_MAX +}; + enum mt76_rxq_id { MT_RXQ_MAIN, MT_RXQ_MCU, MT_RXQ_MCU_WA, + MT_RXQ_EXT, __MT_RXQ_MAX }; @@ -121,6 +126,7 @@ struct mt76_queue { struct mt76_queue_regs __iomem *regs; spinlock_t lock; + spinlock_t cleanup_lock; struct mt76_queue_entry *entry; struct mt76_desc *desc; @@ -131,9 +137,11 @@ struct mt76_queue { int queued; int buf_size; bool stopped; + bool blocked; u8 buf_offset; u8 hw_idx; + u8 qid; dma_addr_t desc_dma; struct sk_buff *rx_head; @@ -147,7 +155,9 @@ struct mt76_mcu_ops { int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, int len, bool wait_resp); int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, - int cmd, bool wait_resp); + int cmd, int *seq); + int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, + struct sk_buff *skb, int seq); u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, @@ -164,11 +174,11 @@ struct mt76_queue_ops { int idx, int n_desc, int bufsize, u32 ring_base); - int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, + int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta); - int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, + int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info); void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, @@ -176,7 +186,7 @@ struct mt76_queue_ops { void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); - void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, + void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, bool flush); void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); @@ -185,6 +195,7 @@ struct mt76_queue_ops { enum mt76_wcid_flags { MT_WCID_FLAG_CHECK_PS, MT_WCID_FLAG_PS, + MT_WCID_FLAG_4ADDR, }; #define MT76_N_WCIDS 288 @@ -411,6 +422,7 @@ enum mt76u_out_ep { struct mt76_mcu { struct mutex mutex; u32 msg_seq; + int timeout; struct sk_buff_head res_q; wait_queue_head_t wait; @@ -426,7 +438,9 @@ struct mt76_usb { u8 *data; u16 data_len; - struct tasklet_struct rx_tasklet; + struct mt76_worker status_worker; + struct mt76_worker rx_worker; + struct work_struct stat_work; u8 out_ep[__MT_EP_OUT_MAX]; @@ -445,25 +459,18 @@ struct mt76_usb { #define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE) struct mt76_sdio { - struct workqueue_struct *txrx_wq; - struct { - struct work_struct xmit_work; - struct work_struct status_work; - } tx; - struct { - struct work_struct recv_work; - struct work_struct net_work; - } rx; + struct mt76_worker txrx_worker; + struct mt76_worker status_worker; + struct mt76_worker net_worker; struct work_struct stat_work; - u8 *xmit_buf[MT_TXQ_MCU_WA]; + u8 *xmit_buf[IEEE80211_NUM_ACS + 2]; struct sdio_func *func; void *intr_data; struct { - struct mutex lock; int pse_data_quota; int ple_data_quota; int pse_mcu_quota; @@ -528,6 +535,8 @@ struct mt76_testmode_data { u8 tx_rate_nss; u8 tx_rate_sgi; u8 tx_rate_ldpc; + u8 tx_rate_stbc; + u8 tx_ltf; u8 tx_antenna_mask; @@ -555,15 +564,20 @@ struct mt76_phy { unsigned long state; + struct mt76_queue *q_tx[__MT_TXQ_MAX]; + struct cfg80211_chan_def chandef; struct ieee80211_channel *main_chan; struct mt76_channel_state *chan_state; ktime_t survey_time; + struct mt76_hw_cap cap; struct mt76_sband sband_2g; struct mt76_sband sband_5g; + u8 macaddr[ETH_ALEN]; + u32 vif_mask; int txpower_cur; @@ -601,7 +615,7 @@ struct mt76_dev { struct sk_buff_head rx_skb[__MT_RXQ_MAX]; struct list_head txwi_cache; - struct mt76_queue *q_tx[2 * __MT_TXQ_MAX]; + struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; int tx_dma_idx[4]; @@ -619,7 +633,6 @@ struct mt76_dev { struct mt76_wcid global_wcid; struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; - u8 macaddr[ETH_ALEN]; u32 rev; u32 aggr_stats[32]; @@ -630,7 +643,6 @@ struct mt76_dev { struct debugfs_blob_wrapper eeprom; struct debugfs_blob_wrapper otp; - struct mt76_hw_cap cap; struct mt76_rate_power rate_power; @@ -690,10 +702,7 @@ enum mt76_phy_type { #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) -#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) -#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__) -#define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__) #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev)) @@ -752,7 +761,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev) #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) -#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) +#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) #define mt76_for_each_q_rx(dev, i) \ @@ -770,7 +779,8 @@ void mt76_unregister_phy(struct mt76_phy *phy); struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, const struct ieee80211_ops *ops); -int mt76_register_phy(struct mt76_phy *phy); +int mt76_register_phy(struct mt76_phy *phy, bool vht, + struct ieee80211_rate *rates, int n_rates); struct dentry *mt76_register_debugfs(struct mt76_dev *dev); int mt76_queues_read(struct seq_file *s, void *data); @@ -778,7 +788,40 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len); int mt76_eeprom_init(struct mt76_dev *dev, int len); -void mt76_eeprom_override(struct mt76_dev *dev); +void mt76_eeprom_override(struct mt76_phy *phy); + +struct mt76_queue * +mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, + int ring_base); +static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, + int n_desc, int ring_base) +{ + struct mt76_queue *q; + + q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base); + if (IS_ERR(q)) + return PTR_ERR(q); + + q->qid = qid; + phy->q_tx[qid] = q; + + return 0; +} + +static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, + int n_desc, int ring_base) +{ + struct mt76_queue *q; + + q = mt76_init_queue(dev, qid, idx, n_desc, ring_base); + if (IS_ERR(q)) + return PTR_ERR(q); + + q->qid = __MT_TXQ_MAX + qid; + dev->q_mcu[qid] = q; + + return 0; +} static inline struct mt76_phy * mt76_dev_phy(struct mt76_dev *dev, bool phy_ext) @@ -901,7 +944,7 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb); void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); -void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, +void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, bool send_bar); void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); @@ -1064,7 +1107,6 @@ void mt76u_queues_deinit(struct mt76_dev *dev); int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, const struct mt76_bus_ops *bus_ops); int mt76s_alloc_queues(struct mt76_dev *dev); -void mt76s_stop_txrx(struct mt76_dev *dev); void mt76s_deinit(struct mt76_dev *dev); struct sk_buff * @@ -1073,6 +1115,25 @@ mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, unsigned long expires); +int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, + int len, bool wait_resp, struct sk_buff **ret); +int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, + int cmd, bool wait_resp, struct sk_buff **ret); +int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, + int len); +static inline int +mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, + bool wait_resp) +{ + return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); +} + +static inline int +mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, + bool wait_resp) +{ + return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); +} void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c index d728c5e43783..5d4522f440b7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -13,23 +13,25 @@ static void mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt7603_dev *dev = (struct mt7603_dev *)priv; + struct mt76_dev *mdev = &dev->mt76; struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; struct sk_buff *skb = NULL; - if (!(dev->mt76.beacon_mask & BIT(mvif->idx))) + if (!(mdev->beacon_mask & BIT(mvif->idx))) return; skb = ieee80211_beacon_get(mt76_hw(dev), vif); if (!skb) return; - mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL); + mt76_tx_queue_skb(dev, dev->mphy.q_tx[MT_TXQ_BEACON], skb, + &mvif->sta.wcid, NULL); spin_lock_bh(&dev->ps_lock); mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, - dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) | + dev->mphy.q_tx[MT_TXQ_CAB]->hw_idx) | FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); @@ -64,9 +66,10 @@ mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) data->count[mvif->idx]++; } -void mt7603_pre_tbtt_tasklet(unsigned long arg) +void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t) { - struct mt7603_dev *dev = (struct mt7603_dev *)arg; + struct mt7603_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet); + struct mt76_dev *mdev = &dev->mt76; struct mt76_queue *q; struct beacon_bc_data data = {}; struct sk_buff *skb; @@ -78,7 +81,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) data.dev = dev; __skb_queue_head_init(&data.q); - q = dev->mt76.q_tx[MT_TXQ_BEACON]; + q = dev->mphy.q_tx[MT_TXQ_BEACON]; spin_lock_bh(&q->lock); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, @@ -89,13 +92,13 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) /* Flush all previous CAB queue packets */ mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); - mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_CAB], false); - mt76_csa_check(&dev->mt76); - if (dev->mt76.csa_complete) + mt76_csa_check(mdev); + if (mdev->csa_complete) goto out; - q = dev->mt76.q_tx[MT_TXQ_CAB]; + q = dev->mphy.q_tx[MT_TXQ_CAB]; do { nframes = skb_queue_len(&data.q); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), @@ -120,7 +123,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) struct ieee80211_vif *vif = info->control.vif; struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; - mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL); + mt76_tx_queue_skb(dev, q, skb, &mvif->sta.wcid, NULL); } mt76_queue_kick(dev, q); spin_unlock_bh(&q->lock); @@ -135,9 +138,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg) ((1 << (MT7603_MAX_INTERFACES - 1)) - 1))); out: - mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); - if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued > - hweight8(dev->mt76.beacon_mask)) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BEACON], false); + if (dev->mphy.q_tx[MT_TXQ_BEACON]->queued > hweight8(mdev->beacon_mask)) dev->beacon_check++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index d60d00f6f6a0..0086f18cb79a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -4,27 +4,6 @@ #include "mac.h" #include "../dma.h" -static int -mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc) -{ - struct mt76_queue *hwq; - int err; - - hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); - if (!hwq) - return -ENOMEM; - - err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); - if (err < 0) - return err; - - dev->mt76.q_tx[qid] = hwq; - - mt7603_irq_enable(dev, MT_INT_TX_DONE(idx)); - - return 0; -} - static void mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) { @@ -152,14 +131,16 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget) dev = container_of(napi, struct mt7603_dev, mt76.tx_napi); dev->tx_dma_check = 0; - for (i = MT_TXQ_MCU; i >= 0; i--) - mt76_queue_tx_cleanup(dev, i, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + for (i = MT_TXQ_PSD; i >= 0; i--) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); if (napi_complete_done(napi, 0)) mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL); - for (i = MT_TXQ_MCU; i >= 0; i--) - mt76_queue_tx_cleanup(dev, i, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + for (i = MT_TXQ_PSD; i >= 0; i--) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); mt7603_mac_sta_poll(dev); @@ -191,32 +172,42 @@ int mt7603_dma_init(struct mt7603_dev *dev) mt7603_pse_client_reset(dev); for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { - ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i], - MT7603_TX_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], + MT7603_TX_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; } - ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD, - MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, + MT7603_PSD_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; - ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU, - MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU, + MT_MCU_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; - ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON, - MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_BEACON, MT_TX_HW_QUEUE_BCN, + MT_MCU_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; - ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB, - MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_CAB, MT_TX_HW_QUEUE_BMC, + MT_MCU_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; + mt7603_irq_enable(dev, + MT_INT_TX_DONE(IEEE80211_AC_VO) | + MT_INT_TX_DONE(IEEE80211_AC_VI) | + MT_INT_TX_DONE(IEEE80211_AC_BE) | + MT_INT_TX_DONE(IEEE80211_AC_BK) | + MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) | + MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU) | + MT_INT_TX_DONE(MT_TX_HW_QUEUE_BCN) | + MT_INT_TX_DONE(MT_TX_HW_QUEUE_BMC)); + ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c index 01f1e0da5ee1..d951cb81df83 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c @@ -141,6 +141,7 @@ static int mt7603_check_eeprom(struct mt76_dev *dev) switch (val) { case 0x7628: case 0x7603: + case 0x7600: return 0; default: return -EINVAL; @@ -170,8 +171,8 @@ int mt7603_eeprom_init(struct mt7603_dev *dev) } eeprom = (u8 *)dev->mt76.eeprom.data; - dev->mt76.cap.has_2ghz = true; - memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN); + dev->mphy.cap.has_2ghz = true; + memcpy(dev->mphy.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN); /* Check for 1SS devices */ dev->mphy.antenna_mask = 3; @@ -180,7 +181,7 @@ int mt7603_eeprom_init(struct mt7603_dev *dev) is_mt7688(dev)) dev->mphy.antenna_mask = 1; - mt76_eeprom_override(&dev->mt76); + mt76_eeprom_override(&dev->mphy); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c index c4848fafd270..b14e08046e20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -533,8 +533,7 @@ int mt7603_register_device(struct mt7603_dev *dev) spin_lock_init(&dev->ps_lock); INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work); - tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, - (unsigned long)dev); + tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet); dev->slottime = 9; dev->sensitivity_limit = 28; @@ -557,6 +556,7 @@ int mt7603_register_device(struct mt7603_dev *dev) ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); /* init led callbacks */ if (IS_ENABLED(CONFIG_MT76_LEDS)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c index f665a1c95eed..55095e66f2ef 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -445,7 +445,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev) sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); for (i = 0; i < 4; i++) { - struct mt76_queue *q = dev->mt76.q_tx[i]; + struct mt76_queue *q = dev->mphy.q_tx[i]; u8 qidx = q->hw_idx; u8 tid = ac_to_tid[i]; u32 txtime = airtime[qidx]; @@ -896,7 +896,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; struct ieee80211_vif *vif = info->control.vif; - struct mt76_queue *q = dev->mt76.q_tx[qid]; + struct mt76_queue *q = dev->mphy.q_tx[qid]; struct mt7603_vif *mvif; int wlan_idx; int hdr_len = ieee80211_get_hdrlen_from_skb(skb); @@ -1434,8 +1434,9 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) mt7603_pse_client_reset(dev); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, i, true); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); @@ -1514,7 +1515,7 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev) int i; for (i = 0; i < 4; i++) { - q = dev->mt76.q_tx[i]; + q = dev->mphy.q_tx[i]; if (!q->queued) continue; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index c9226dceb510..6d47b57cbc39 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -380,9 +380,11 @@ mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list) { struct sk_buff *skb; - while ((skb = __skb_dequeue(list)) != NULL) - mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb), - skb, 0); + while ((skb = __skb_dequeue(list)) != NULL) { + int qid = skb_get_queue_mapping(skb); + + mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[qid], skb, 0); + } } void @@ -392,7 +394,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; struct sk_buff_head list; - mt76_stop_tx_queues(&dev->mt76, sta, true); + mt76_stop_tx_queues(&dev->mphy, sta, true); mt7603_wtbl_set_ps(dev, msta, ps); if (ps) return; @@ -512,7 +514,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, u16 cw_max = (1 << 10) - 1; u32 val; - queue = dev->mt76.q_tx[queue]->hw_idx; + queue = dev->mphy.q_tx[queue]->hw_idx; if (params->cw_min) cw_min = params->cw_min; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c index a47a3a644ecc..96b6c8916730 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -14,14 +14,38 @@ struct mt7603_fw_trailer { } __packed; static int -__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, - int cmd, int *wait_seq) +mt7603_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) { + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt7603_mcu_rxd *rxd; + + if (!skb) { + dev_err(mdev->dev, + "MCU message %d (seq %d) timed out\n", + cmd, seq); + dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT; + return -ETIMEDOUT; + } + + rxd = (struct mt7603_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -EAGAIN; + + return 0; +} + +static int +mt7603_mcu_skb_send_msg(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12; - struct mt76_dev *mdev = &dev->mt76; struct mt7603_mcu_txd *txd; u8 seq; + mdev->mcu.timeout = 3 * HZ; + seq = ++mdev->mcu.msg_seq & 0xf; if (!seq) seq = ++mdev->mcu.msg_seq & 0xf; @@ -49,56 +73,7 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, if (wait_seq) *wait_seq = seq; - return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0); -} - -static int -mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, - int len, bool wait_resp) -{ - struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); - unsigned long expires = jiffies + 3 * HZ; - struct mt7603_mcu_rxd *rxd; - struct sk_buff *skb; - int ret, seq; - - skb = mt76_mcu_msg_alloc(mdev, data, len); - if (!skb) - return -ENOMEM; - - mutex_lock(&mdev->mcu.mutex); - - ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq); - if (ret) - goto out; - - while (wait_resp) { - bool check_seq = false; - - skb = mt76_mcu_get_response(&dev->mt76, expires); - if (!skb) { - dev_err(mdev->dev, - "MCU message %d (seq %d) timed out\n", - cmd, seq); - dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT; - ret = -ETIMEDOUT; - break; - } - - rxd = (struct mt7603_mcu_rxd *)skb->data; - if (seq == rxd->seq) - check_seq = true; - - dev_kfree_skb(skb); - - if (check_seq) - break; - } - -out: - mutex_unlock(&mdev->mcu.mutex); - - return ret; + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, 0); } static int @@ -114,29 +89,8 @@ mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len) .mode = cpu_to_le32(BIT(31)), }; - return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ, - &req, sizeof(req), true); -} - -static int -mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len) -{ - int cur_len, ret = 0; - - while (len > 0) { - cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd), - len); - - ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER, - data, cur_len, false); - if (ret) - break; - - data += cur_len; - len -= cur_len; - } - - return ret; + return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ, + &req, sizeof(req), true); } static int @@ -150,15 +104,14 @@ mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr) .addr = cpu_to_le32(addr), }; - return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, &req, + sizeof(req), true); } static int mt7603_mcu_restart(struct mt76_dev *dev) { - return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, - NULL, 0, true); + return mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ, NULL, 0, true); } static int mt7603_load_firmware(struct mt7603_dev *dev) @@ -226,7 +179,8 @@ static int mt7603_load_firmware(struct mt7603_dev *dev) goto out; } - ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len); + ret = mt76_mcu_send_firmware(&dev->mt76, -MCU_CMD_FW_SCATTER, + fw->data, dl_len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); goto out; @@ -266,7 +220,8 @@ int mt7603_mcu_init(struct mt7603_dev *dev) { static const struct mt76_mcu_ops mt7603_mcu_ops = { .headroom = sizeof(struct mt7603_mcu_txd), - .mcu_send_msg = mt7603_mcu_msg_send, + .mcu_skb_send_msg = mt7603_mcu_skb_send_msg, + .mcu_parse_response = mt7603_mcu_parse_response, .mcu_restart = mt7603_mcu_restart, }; @@ -377,8 +332,8 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev) data[i].val = eep[req_fields[i]]; } - ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, - req, len, true); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, + req, len, true); kfree(req); return ret; @@ -424,8 +379,8 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev) memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7, sizeof(req.temp_comp_power)); - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL, + &req, sizeof(req), true); } int mt7603_mcu_set_channel(struct mt7603_dev *dev) @@ -470,8 +425,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev) for (i = 0; i < ARRAY_SIZE(req.txpower); i++) req.txpower[i] = tx_power; - ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH, - &req, sizeof(req), true); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH, &req, + sizeof(req), true); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 2a6e4332ad06..6e0a92a28b1c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -256,7 +256,7 @@ void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -void mt7603_pre_tbtt_tasklet(unsigned long arg); +void mt7603_pre_tbtt_tasklet(struct tasklet_struct *t); void mt7603_update_channel(struct mt76_dev *mdev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c index a5845da3547a..06fa28f645f2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c @@ -57,7 +57,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(&dev->mt76); + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 00ba550fc48f..4d5e3f8b2a62 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -55,11 +55,26 @@ static int mt7615_pm_set(void *data, u64 val) { struct mt7615_dev *dev = data; + int ret = 0; if (!mt7615_wait_for_mcu_init(dev)) return 0; - return mt7615_pm_set_enable(dev, val); + if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) + return -EOPNOTSUPP; + + mt7615_mutex_acquire(dev); + + if (dev->phy.n_beacon_vif) { + ret = -EBUSY; + goto out; + } + + dev->pm.enable = val; +out: + mt7615_mutex_release(dev); + + return ret; } static int @@ -172,7 +187,7 @@ mt7615_reset_test_set(void *data, u64 val) skb_put(skb, 1); mt7615_mutex_acquire(dev); - mt76_tx_queue_skb_raw(dev, 0, skb, 0); + mt76_tx_queue_skb_raw(dev, dev->mphy.q_tx[0], skb, 0); mt7615_mutex_release(dev); return 0; @@ -317,21 +332,18 @@ static int mt7615_queues_read(struct seq_file *s, void *data) { struct mt7615_dev *dev = dev_get_drvdata(s->private); - static const struct { + struct { + struct mt76_queue *q; char *queue; - int id; } queue_map[] = { - { "PDMA0", MT_TXQ_BE }, - { "MCUQ", MT_TXQ_MCU }, - { "MCUFWQ", MT_TXQ_FWDL }, + { dev->mphy.q_tx[MT_TXQ_BE], "PDMA0" }, + { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUQ" }, + { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, }; int i; for (i = 0; i < ARRAY_SIZE(queue_map); i++) { - struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id]; - - if (!q) - continue; + struct mt76_queue *q = queue_map[i].q; seq_printf(s, "%s: queued=%d head=%d tail=%d\n", @@ -365,6 +377,107 @@ mt7615_rf_reg_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set, "0x%08llx\n"); +static ssize_t +mt7615_ext_mac_addr_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct mt7615_dev *dev = file->private_data; + char buf[32 * ((ETH_ALEN * 3) + 4) + 1]; + u8 addr[ETH_ALEN]; + int ofs = 0; + int i; + + for (i = 0; i < 32; i++) { + if (!(dev->muar_mask & BIT(i))) + continue; + + mt76_wr(dev, MT_WF_RMAC_MAR1, + FIELD_PREP(MT_WF_RMAC_MAR1_IDX, i * 2) | + MT_WF_RMAC_MAR1_START); + put_unaligned_le32(mt76_rr(dev, MT_WF_RMAC_MAR0), addr); + put_unaligned_le16((mt76_rr(dev, MT_WF_RMAC_MAR1) & + MT_WF_RMAC_MAR1_ADDR), addr + 4); + ofs += snprintf(buf + ofs, sizeof(buf) - ofs, "%d=%pM\n", i, addr); + } + + return simple_read_from_buffer(userbuf, count, ppos, buf, ofs); +} + +static ssize_t +mt7615_ext_mac_addr_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct mt7615_dev *dev = file->private_data; + unsigned long idx = 0; + u8 addr[ETH_ALEN]; + char buf[32]; + char *p; + + if (count > sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[sizeof(buf) - 1] = '\0'; + + p = strchr(buf, '='); + if (p) { + *p = 0; + p++; + + if (kstrtoul(buf, 0, &idx) || idx > 31) + return -EINVAL; + } else { + idx = 0; + p = buf; + } + + if (!mac_pton(p, addr)) + return -EINVAL; + + if (is_valid_ether_addr(addr)) { + dev->muar_mask |= BIT(idx); + } else { + memset(addr, 0, sizeof(addr)); + dev->muar_mask &= ~BIT(idx); + } + + mt76_rmw_field(dev, MT_WF_RMAC_MORE(0), MT_WF_RMAC_MORE_MUAR_MODE, 1); + mt76_wr(dev, MT_WF_RMAC_MAR0, get_unaligned_le32(addr)); + mt76_wr(dev, MT_WF_RMAC_MAR1, + get_unaligned_le16(addr + 4) | + FIELD_PREP(MT_WF_RMAC_MAR1_IDX, idx * 2) | + MT_WF_RMAC_MAR1_START | + MT_WF_RMAC_MAR1_WRITE); + + mt76_rmw_field(dev, MT_WF_RMAC_MORE(0), MT_WF_RMAC_MORE_MUAR_MODE, !!dev->muar_mask); + + return count; +} + +static const struct file_operations fops_ext_mac_addr = { + .open = simple_open, + .llseek = generic_file_llseek, + .read = mt7615_ext_mac_addr_read, + .write = mt7615_ext_mac_addr_write, + .owner = THIS_MODULE, +}; + +static int +mt7663s_sched_quota_read(struct seq_file *s, void *data) +{ + struct mt7615_dev *dev = dev_get_drvdata(s->private); + struct mt76_sdio *sdio = &dev->mt76.sdio; + + seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota); + seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota); + seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota); + seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit); + + return 0; +} + int mt7615_init_debugfs(struct mt7615_dev *dev) { struct dentry *dir; @@ -406,11 +519,15 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) &fops_reset_test); debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, mt7615_read_temperature); + debugfs_create_file("ext_mac_addr", 0600, dir, dev, &fops_ext_mac_addr); debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf); debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg); debugfs_create_file_unsafe("rf_regval", 0600, dir, dev, &fops_rf_reg); + if (mt76_is_sdio(&dev->mt76)) + debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir, + mt7663s_sched_quota_read); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index bf8ae14121db..25e3069cf2b1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -12,25 +12,6 @@ #include "mac.h" static int -mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc) -{ - struct mt76_queue *hwq; - int err; - - hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); - if (!hwq) - return -ENOMEM; - - err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); - if (err < 0) - return err; - - dev->mt76.q_tx[qid] = hwq; - - return 0; -} - -static int mt7622_init_tx_queues_multi(struct mt7615_dev *dev) { static const u8 wmm_queue_map[] = { @@ -43,20 +24,21 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev) int i; for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { - ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i], - MT7615_TX_RING_SIZE / 2); + ret = mt76_init_tx_queue(&dev->mphy, i, wmm_queue_map[i], + MT7615_TX_RING_SIZE / 2, + MT_TX_RING_BASE); if (ret) return ret; } - ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD, - MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT7622_TXQ_MGMT, + MT7615_TX_MGMT_RING_SIZE, + MT_TX_RING_BASE); if (ret) return ret; - ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, - MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE); - return ret; + return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7622_TXQ_MCU, + MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE); } static int @@ -64,25 +46,24 @@ mt7615_init_tx_queues(struct mt7615_dev *dev) { int ret, i; - ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL, - MT7615_TXQ_FWDL, - MT7615_TX_FWDL_RING_SIZE); + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7615_TXQ_FWDL, + MT7615_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; if (!is_mt7615(&dev->mt76)) return mt7622_init_tx_queues_multi(dev); - ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, 0, 0, MT7615_TX_RING_SIZE, + MT_TX_RING_BASE); if (ret) return ret; - for (i = 1; i < MT_TXQ_MCU; i++) - dev->mt76.q_tx[i] = dev->mt76.q_tx[0]; + for (i = 1; i <= MT_TXQ_PSD ; i++) + dev->mphy.q_tx[i] = dev->mphy.q_tx[0]; - ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU, - MT7615_TX_MCU_RING_SIZE); - return 0; + return mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7615_TXQ_MCU, + MT7615_TX_MCU_RING_SIZE, MT_TX_RING_BASE); } static int mt7615_poll_tx(struct napi_struct *napi, int budget) @@ -91,7 +72,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget) dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); - mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); if (napi_complete_done(napi, 0)) mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev)); @@ -202,7 +183,7 @@ int mt7615_dma_init(struct mt7615_dev *dev) int ret; /* Increase buffer size to receive large VHT MPDUs */ - if (dev->mt76.cap.has_5ghz) + if (dev->mphy.cap.has_5ghz) rx_buf_size *= 2; mt76_dma_attach(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index f4756bb946c3..3232ebd5eda6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -99,20 +99,20 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) if (is_mt7663(&dev->mt76)) { /* dual band */ - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; return; } if (is_mt7622(&dev->mt76)) { /* 2GHz only */ - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; return; } if (is_mt7611(&dev->mt76)) { /* 5GHz only */ - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_5ghz = true; return; } @@ -120,17 +120,17 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) eeprom[MT_EE_WIFI_CONF]); switch (val) { case MT_EE_5GHZ: - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_5ghz = true; break; case MT_EE_2GHZ: - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; break; case MT_EE_DBDC: dev->dbdc_support = true; - /* fall through */ + fallthrough; default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; break; } } @@ -342,10 +342,10 @@ int mt7615_eeprom_init(struct mt7615_dev *dev, u32 addr) } mt7615_eeprom_parse_hw_cap(dev); - memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN); - mt76_eeprom_override(&dev->mt76); + mt76_eeprom_override(&dev->mphy); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index e194259c84e9..a73b76e57c7f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -221,7 +221,7 @@ static const struct ieee80211_iface_combination if_comb_radar[] = { { .limits = if_limits, .n_limits = ARRAY_SIZE(if_limits), - .max_interfaces = 4, + .max_interfaces = MT7615_MAX_INTERFACES, .num_different_channels = 1, .beacon_int_infra_match = true, .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | @@ -237,7 +237,7 @@ static const struct ieee80211_iface_combination if_comb[] = { { .limits = if_limits, .n_limits = ARRAY_SIZE(if_limits), - .max_interfaces = 4, + .max_interfaces = MT7615_MAX_INTERFACES, .num_different_channels = 1, .beacon_int_infra_match = true, } @@ -385,7 +385,7 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) { struct mt7615_phy *phy = mt7615_ext_phy(dev); struct mt76_phy *mphy; - int ret; + int i, ret; if (!is_mt7615(&dev->mt76)) return -EOPNOTSUPP; @@ -422,14 +422,21 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) * Make the secondary PHY MAC address local without overlapping with * the usual MAC address allocation scheme on multiple virtual interfaces */ - mphy->hw->wiphy->perm_addr[0] |= 2; - mphy->hw->wiphy->perm_addr[0] ^= BIT(7); + memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); + mphy->macaddr[0] |= 2; + mphy->macaddr[0] ^= BIT(7); + mt76_eeprom_override(mphy); /* second phy can only handle 5 GHz */ - mphy->sband_2g.sband.n_channels = 0; - mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; + mphy->cap.has_5ghz = true; - ret = mt76_register_phy(mphy); + /* mt7615 second phy shares the same hw queues with the primary one */ + for (i = 0; i <= MT_TXQ_PSD ; i++) + mphy->q_tx[i] = dev->mphy.q_tx[i]; + + ret = mt76_register_phy(mphy, true, mt7615_rates, + ARRAY_SIZE(mt7615_rates)); if (ret) ieee80211_free_hw(mphy->hw); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 8dc645e398fd..0f360be0b885 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -215,8 +215,8 @@ static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv) dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); - dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); - dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); + dev->test.last_ib_rssi[0] = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); + dev->test.last_wb_rssi[0] = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); #endif } @@ -915,22 +915,20 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *rates) { struct mt7615_dev *dev = phy->dev; - struct mt7615_wtbl_desc *wd; + struct mt7615_wtbl_rate_desc *wrd; - if (work_pending(&dev->wtbl_work)) + if (work_pending(&dev->rate_work)) return -EBUSY; - wd = kzalloc(sizeof(*wd), GFP_ATOMIC); - if (!wd) + wrd = kzalloc(sizeof(*wrd), GFP_ATOMIC); + if (!wrd) return -ENOMEM; - wd->type = MT7615_WTBL_RATE_DESC; - wd->sta = sta; - + wrd->sta = sta; mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, - &wd->rate); - list_add_tail(&wd->node, &dev->wd_head); - queue_work(dev->mt76.wq, &dev->wtbl_work); + &wrd->rate); + list_add_tail(&wrd->node, &dev->wrd_head); + queue_work(dev->mt76.wq, &dev->rate_work); return 0; } @@ -1030,31 +1028,33 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, } EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); -int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - u8 *key, u8 keylen, - enum mt7615_cipher_type cipher, - enum set_key_cmd cmd) +static int +mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx) + 30 * 4; u8 data[32] = {}; - if (keylen > sizeof(data)) + if (key->keylen > sizeof(data)) return -EINVAL; mt76_rr_copy(dev, addr, data, sizeof(data)); if (cmd == SET_KEY) { if (cipher == MT_CIPHER_TKIP) { /* Rx/Tx MIC keys are swapped */ - memcpy(data + 16, key + 24, 8); - memcpy(data + 24, key + 16, 8); + memcpy(data, key->key, 16); + memcpy(data + 16, key->key + 24, 8); + memcpy(data + 24, key->key + 16, 8); + } else { + if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) + memmove(data + 16, data, 16); + if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) + memcpy(data, key->key, key->keylen); + else if (cipher == MT_CIPHER_BIP_CMAC_128) + memcpy(data + 16, key->key, 16); } - if (cipher != MT_CIPHER_BIP_CMAC_128 && wcid->cipher) - memmove(data + 16, data, 16); - if (cipher != MT_CIPHER_BIP_CMAC_128 || !wcid->cipher) - memcpy(data, key, keylen); - else if (cipher == MT_CIPHER_BIP_CMAC_128) - memcpy(data + 16, key, 16); } else { if (wcid->cipher & ~BIT(cipher)) { if (cipher != MT_CIPHER_BIP_CMAC_128) @@ -1068,12 +1068,11 @@ int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, return 0; } -EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_key); -int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, - int keyidx, enum set_key_cmd cmd) +static int +mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + int keyidx, enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx), w0, w1; @@ -1105,12 +1104,11 @@ int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, return 0; } -EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_pk); -void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, - enum set_key_cmd cmd) +static void +mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt7615_cipher_type cipher, + enum set_key_cmd cmd) { u32 addr = mt7615_mac_wtbl_addr(dev, wcid->idx); @@ -1128,12 +1126,11 @@ void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, mt76_clear(dev, addr + 2 * 4, MT_WTBL_W2_KEY_TYPE); } } -EXPORT_SYMBOL_GPL(mt7615_mac_wtbl_update_cipher); -int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - struct ieee80211_key_conf *key, - enum set_key_cmd cmd) +int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd) { enum mt7615_cipher_type cipher; int err; @@ -1142,25 +1139,32 @@ int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, if (cipher == MT_CIPHER_NONE) return -EOPNOTSUPP; - spin_lock_bh(&dev->mt76.lock); - mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, cmd); - err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, - cipher, cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key, cipher, cmd); if (err < 0) - goto out; + return err; - err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, - cmd); + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, cmd); if (err < 0) - goto out; + return err; if (cmd == SET_KEY) wcid->cipher |= BIT(cipher); else wcid->cipher &= ~BIT(cipher); -out: + return 0; +} + +int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + int err; + + spin_lock_bh(&dev->mt76.lock); + err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); spin_unlock_bh(&dev->mt76.lock); return err; @@ -1431,12 +1435,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data; u8 i, count; - mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); if (is_mt7615(&dev->mt76)) { - mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); } else { for (i = 0; i < IEEE80211_NUM_ACS; i++) - mt76_queue_tx_cleanup(dev, i, false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); } count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl)); @@ -1969,49 +1973,6 @@ out: queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); } -static void -mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct mt7615_phy *phy = priv; - struct mt7615_dev *dev = phy->dev; - bool ext_phy = phy != &dev->phy; - - if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable)) - return; - - if (dev->pm.enable) { - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - mt76_set(dev, MT_WF_RFCR(ext_phy), - MT_WF_RFCR_DROP_OTHER_BEACON); - } else { - vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; - mt76_clear(dev, MT_WF_RFCR(ext_phy), - MT_WF_RFCR_DROP_OTHER_BEACON); - } -} - -int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable) -{ - struct mt76_phy *mphy = dev->phy.mt76; - - if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) - return -EOPNOTSUPP; - - mt7615_mutex_acquire(dev); - - if (dev->pm.enable == enable) - goto out; - - dev->pm.enable = enable; - ieee80211_iterate_active_interfaces(mphy->hw, - IEEE80211_IFACE_ITER_RESUME_ALL, - mt7615_pm_interface_iter, mphy->priv); -out: - mt7615_mutex_release(dev); - - return 0; -} - void mt7615_mac_work(struct work_struct *work) { struct mt7615_phy *phy; @@ -2083,8 +2044,9 @@ void mt7615_dma_reset(struct mt7615_dev *dev) MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); usleep_range(1000, 2000); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, i, true); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); @@ -2314,3 +2276,46 @@ stop: mt7615_dfs_stop_radar_detector(phy); return 0; } + +int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int err; + + if (!mt7615_firmware_offload(dev)) + return -EOPNOTSUPP; + + switch (vif->type) { + case NL80211_IFTYPE_MONITOR: + return 0; + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + if (enable) + phy->n_beacon_vif++; + else + phy->n_beacon_vif--; + fallthrough; + default: + break; + } + + err = mt7615_mcu_set_bss_pm(dev, vif, !phy->n_beacon_vif); + if (err) + return err; + + if (phy->n_beacon_vif) { + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + mt76_clear(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } else { + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + mt76_set(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 3186b7b2ca48..56dd0b4e4460 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -115,29 +115,50 @@ static void mt7615_stop(struct ieee80211_hw *hw) mt7615_mutex_release(dev); } -static int get_omac_idx(enum nl80211_iftype type, u32 mask) +static inline int get_free_idx(u32 mask, u8 start, u8 end) +{ + return ffs(~mask & GENMASK(end, start)); +} + +static int get_omac_idx(enum nl80211_iftype type, u64 mask) { int i; switch (type) { - case NL80211_IFTYPE_MONITOR: - case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_ADHOC: - /* ap use hw bssid 0 and ext bssid */ + case NL80211_IFTYPE_STATION: + /* prefer hw bssid slot 1-3 */ + i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3); + if (i) + return i - 1; + + if (type != NL80211_IFTYPE_STATION) + break; + + /* next, try to find a free repeater entry for the sta */ + i = get_free_idx(mask >> REPEATER_BSSID_START, 0, + REPEATER_BSSID_MAX - REPEATER_BSSID_START); + if (i) + return i + 32 - 1; + + i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX); + if (i) + return i - 1; + if (~mask & BIT(HW_BSSID_0)) return HW_BSSID_0; - for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++) - if (~mask & BIT(i)) - return i; - break; - case NL80211_IFTYPE_STATION: - /* sta use hw bssid other than 0 */ - for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++) - if (~mask & BIT(i)) - return i; + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_AP: + /* ap uses hw bssid 0 and ext bssid */ + if (~mask & BIT(HW_BSSID_0)) + return HW_BSSID_0; + + i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX); + if (i) + return i - 1; break; default: @@ -187,8 +208,8 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS; dev->mphy.vif_mask |= BIT(mvif->idx); - dev->omac_mask |= BIT(mvif->omac_idx); - phy->omac_mask |= BIT(mvif->omac_idx); + dev->omac_mask |= BIT_ULL(mvif->omac_idx); + phy->omac_mask |= BIT_ULL(mvif->omac_idx); mt7615_mcu_set_dbdc(dev); @@ -211,15 +232,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, if (ret) goto out; - if (dev->pm.enable) { - ret = mt7615_mcu_set_bss_pm(dev, vif, true); - if (ret) - goto out; - - vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; - mt76_set(dev, MT_WF_RFCR(ext_phy), - MT_WF_RFCR_DROP_OTHER_BEACON); - } + mt7615_mac_set_beacon_filter(phy, vif, true); out: mt7615_mutex_release(dev); @@ -245,20 +258,14 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, mt7615_free_pending_tx_skbs(dev, msta); - if (dev->pm.enable) { - bool ext_phy = phy != &dev->phy; - - mt7615_mcu_set_bss_pm(dev, vif, false); - mt76_clear(dev, MT_WF_RFCR(ext_phy), - MT_WF_RFCR_DROP_OTHER_BEACON); - } + mt7615_mac_set_beacon_filter(phy, vif, false); mt7615_mcu_add_dev_info(dev, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); dev->mphy.vif_mask &= ~BIT(mvif->idx); - dev->omac_mask &= ~BIT(mvif->omac_idx); - phy->omac_mask &= ~BIT(mvif->omac_idx); + dev->omac_mask &= ~BIT_ULL(mvif->omac_idx); + phy->omac_mask &= ~BIT_ULL(mvif->omac_idx); mt7615_mutex_release(dev); @@ -334,39 +341,6 @@ out: return ret; } -static int -mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd, - struct mt7615_sta *msta, - struct ieee80211_key_conf *key) -{ - struct mt7615_wtbl_desc *wd; - - wd = kzalloc(sizeof(*wd), GFP_KERNEL); - if (!wd) - return -ENOMEM; - - wd->type = MT7615_WTBL_KEY_DESC; - wd->sta = msta; - - wd->key.key = kmemdup(key->key, key->keylen, GFP_KERNEL); - if (!wd->key.key) { - kfree(wd); - return -ENOMEM; - } - wd->key.cipher = key->cipher; - wd->key.keyidx = key->keyidx; - wd->key.keylen = key->keylen; - wd->key.cmd = cmd; - - spin_lock_bh(&dev->mt76.lock); - list_add_tail(&wd->node, &dev->wd_head); - spin_unlock_bh(&dev->mt76.lock); - - queue_work(dev->mt76.wq, &dev->wtbl_work); - - return 0; -} - static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -393,8 +367,6 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_AES_CMAC: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: @@ -402,6 +374,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_SMS4: break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: default: return -EOPNOTSUPP; } @@ -420,7 +394,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (mt76_is_mmio(&dev->mt76)) err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); else - err = mt7615_queue_key_update(dev, cmd, msta, key); + err = __mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); mt7615_mutex_release(dev); @@ -511,7 +485,6 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, } while (0) phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | - MT_WF_RFCR_DROP_OTHER_BEACON | MT_WF_RFCR_DROP_FRAME_REPORT | MT_WF_RFCR_DROP_PROBEREQ | MT_WF_RFCR_DROP_MCAST_FILTERED | @@ -522,6 +495,9 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR_DROP_UNWANTED_CTL | MT_WF_RFCR_DROP_STBC_MULTI); + if (phy->n_beacon_vif || !mt7615_firmware_offload(dev)) + phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_BEACON; + MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | MT_WF_RFCR_DROP_A3_MAC | MT_WF_RFCR_DROP_A3_BSSID); @@ -1127,7 +1103,6 @@ static int mt7615_suspend(struct ieee80211_hw *hw, { struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); - bool ext_phy = phy != &dev->phy; int err = 0; cancel_delayed_work_sync(&dev->pm.ps_work); @@ -1139,8 +1114,6 @@ static int mt7615_suspend(struct ieee80211_hw *hw, cancel_delayed_work_sync(&phy->scan_work); cancel_delayed_work_sync(&phy->mac_work); - mt76_set(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON); - set_bit(MT76_STATE_SUSPEND, &phy->mt76->state); ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, @@ -1158,7 +1131,7 @@ static int mt7615_resume(struct ieee80211_hw *hw) { struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); - bool running, ext_phy = phy != &dev->phy; + bool running; mt7615_mutex_acquire(dev); @@ -1182,7 +1155,6 @@ static int mt7615_resume(struct ieee80211_hw *hw) ieee80211_queue_delayed_work(hw, &phy->mac_work, MT7615_WATCHDOG_TIME); - mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON); mt7615_mutex_release(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 31b40fb83f6c..a44b7766dec6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -94,6 +94,9 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, __le32 *txd; u32 val; + /* TODO: make dynamic based on msg type */ + dev->mt76.mcu.timeout = 20 * HZ; + seq = ++dev->mt76.mcu.msg_seq & 0xf; if (!seq) seq = ++dev->mt76.mcu.msg_seq & 0xf; @@ -165,32 +168,22 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(mt7615_mcu_fill_msg); -static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb, - int cmd, int *wait_seq) +int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) { - enum mt76_txq_id qid; - - mt7615_mcu_fill_msg(dev, skb, cmd, wait_seq); - if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) - qid = MT_TXQ_MCU; - else - qid = MT_TXQ_FWDL; - - return mt76_tx_queue_skb_raw(dev, qid, skb, 0); -} - -static int -mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, - struct sk_buff *skb, int seq) -{ - struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; + struct mt7615_mcu_rxd *rxd; int ret = 0; - if (seq != rxd->seq) { - ret = -EAGAIN; - goto out; + if (!skb) { + dev_err(mdev->dev, "Message %ld (seq %d) timeout\n", + cmd & MCU_CMD_MASK, seq); + return -ETIMEDOUT; } + rxd = (struct mt7615_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -EAGAIN; + switch (cmd) { case MCU_CMD_PATCH_SEM_CONTROL: skb_pull(skb, sizeof(*rxd) - 4); @@ -228,69 +221,26 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, default: break; } -out: - dev_kfree_skb(skb); - - return ret; -} - -int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq) -{ - unsigned long expires = jiffies + 20 * HZ; - struct sk_buff *skb; - int ret = 0; - - while (true) { - skb = mt76_mcu_get_response(&dev->mt76, expires); - if (!skb) { - dev_err(dev->mt76.dev, "Message %ld (seq %d) timeout\n", - cmd & MCU_CMD_MASK, seq); - return -ETIMEDOUT; - } - - ret = mt7615_mcu_parse_response(dev, cmd, skb, seq); - if (ret != -EAGAIN) - break; - } return ret; } -EXPORT_SYMBOL_GPL(mt7615_mcu_wait_response); +EXPORT_SYMBOL_GPL(mt7615_mcu_parse_response); static int mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, bool wait_resp) + int cmd, int *seq) { struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - int ret, seq; - - mutex_lock(&mdev->mcu.mutex); - - ret = __mt7615_mcu_msg_send(dev, skb, cmd, &seq); - if (ret) - goto out; - - if (wait_resp) - ret = mt7615_mcu_wait_response(dev, cmd, seq); - -out: - mutex_unlock(&mdev->mcu.mutex); - - return ret; -} - -int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, - int len, bool wait_resp) -{ - struct sk_buff *skb; + enum mt76_txq_id qid; - skb = mt76_mcu_msg_alloc(mdev, data, len); - if (!skb) - return -ENOMEM; + mt7615_mcu_fill_msg(dev, skb, cmd, seq); + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) + qid = MT_MCUQ_WM; + else + qid = MT_MCUQ_FWDL; - return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp); + return mt76_tx_queue_skb_raw(dev, dev->mt76.q_mcu[qid], skb, 0); } -EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send); u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg) { @@ -303,9 +253,9 @@ u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg) .address = cpu_to_le32(reg), }; - return __mt76_mcu_send_msg(&dev->mt76, - MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX, + &req, sizeof(req), true); } int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) @@ -320,8 +270,8 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) .data = cpu_to_le32(val), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req, + sizeof(req), false); } static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) @@ -645,8 +595,44 @@ static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr, .mode = cpu_to_le32(mode), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TARGET_ADDRESS_LEN_REQ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TARGET_ADDRESS_LEN_REQ, + &req, sizeof(req), true); +} + +static int +mt7615_mcu_muar_config(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool bssid, bool enable) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + u32 idx = mvif->omac_idx - REPEATER_BSSID_START; + u32 mask = dev->omac_mask >> 32 & ~BIT(idx); + const u8 *addr = vif->addr; + struct { + u8 mode; + u8 force_clear; + u8 clear_bitmap[8]; + u8 entry_count; + u8 write; + + u8 index; + u8 bssid; + u8 addr[ETH_ALEN]; + } __packed req = { + .mode = !!mask || enable, + .entry_count = 1, + .write = 1, + + .index = idx * 2 + bssid, + }; + + if (bssid) + addr = vif->bss_conf.bssid; + + if (enable) + ether_addr_copy(req.addr, addr); + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MUAR_UPDATE, &req, + sizeof(req), true); } static int @@ -684,9 +670,12 @@ mt7615_mcu_add_dev(struct mt7615_dev *dev, struct ieee80211_vif *vif, }, }; + if (mvif->omac_idx >= REPEATER_BSSID_START) + return mt7615_mcu_muar_config(dev, vif, false, enable); + memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE, - &data, sizeof(data), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE, + &data, sizeof(data), true); } static int @@ -750,8 +739,8 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev, } dev_kfree_skb(skb); - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_BCN_OFFLOAD, &req, + sizeof(req), true); } static int @@ -781,8 +770,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state) .band_idx = band, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, &req, + sizeof(req), true); } static struct sk_buff * @@ -1266,6 +1255,9 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, struct mt7615_dev *dev = phy->dev; struct sk_buff *skb; + if (mvif->omac_idx >= REPEATER_BSSID_START) + mt7615_mcu_muar_config(dev, vif, true, enable); + skb = mt7615_mcu_alloc_sta_req(dev, mvif, NULL); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1275,11 +1267,12 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable); - if (enable && mvif->omac_idx > EXT_BSSID_START) + if (enable && mvif->omac_idx >= EXT_BSSID_START && + mvif->omac_idx < REPEATER_BSSID_START) mt7615_mcu_bss_ext_tlv(skb, mvif); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_BSS_INFO_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_BSS_INFO_UPDATE, true); } static int @@ -1299,8 +1292,8 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, NULL, wtbl_hdr); - err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_WTBL_UPDATE, true); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, + true); if (err < 0) return err; @@ -1310,8 +1303,8 @@ mt7615_mcu_wtbl_tx_ba(struct mt7615_dev *dev, mt7615_mcu_sta_ba_tlv(skb, params, enable, true); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } static int @@ -1331,8 +1324,8 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, mt7615_mcu_sta_ba_tlv(skb, params, enable, false); - err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); if (err < 0 || !enable) return err; @@ -1343,8 +1336,8 @@ mt7615_mcu_wtbl_rx_ba(struct mt7615_dev *dev, mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, NULL, wtbl_hdr); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_WTBL_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, + true); } static int @@ -1383,7 +1376,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif, cmd = enable ? MCU_EXT_CMD_WTBL_UPDATE : MCU_EXT_CMD_STA_REC_UPDATE; skb = enable ? wskb : sskb; - err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); if (err < 0) { skb = enable ? sskb : wskb; dev_kfree_skb(skb); @@ -1394,7 +1387,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif, cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE; skb = enable ? sskb : wskb; - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); } static const struct mt7615_mcu_ops wtbl_update_ops = { @@ -1432,8 +1425,8 @@ mt7615_mcu_sta_ba(struct mt7615_dev *dev, &skb); mt7615_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } static int @@ -1484,7 +1477,7 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif, mt7615_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr); } - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); } static int @@ -1586,7 +1579,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev, data = enable ? (void *)&dev_req : (void *)&basic_req; len = enable ? sizeof(dev_req) : sizeof(basic_req); - err = __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); + err = mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); if (err < 0) return err; @@ -1594,7 +1587,7 @@ mt7615_mcu_uni_add_dev(struct mt7615_dev *dev, data = enable ? (void *)&basic_req : (void *)&dev_req; len = enable ? sizeof(basic_req) : sizeof(dev_req); - return __mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); + return mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); } static int @@ -1708,8 +1701,8 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx); basic_req.basic.conn_state = !enable; - err = __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, - &basic_req, sizeof(basic_req), true); + err = mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + &basic_req, sizeof(basic_req), true); if (err < 0) return err; @@ -1744,8 +1737,8 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) rlm_req.rlm.sco = 3; /* SCB */ - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, - &rlm_req, sizeof(rlm_req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + &rlm_req, sizeof(rlm_req), true); } static int @@ -1816,8 +1809,8 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev, } dev_kfree_skb(skb); - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_BSS_INFO_UPDATE, + &req, sizeof(req), true); } static int @@ -1846,8 +1839,8 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev, mt7615_mcu_wtbl_ba_tlv(skb, params, enable, true, sta_wtbl, wtbl_hdr); - err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_STA_REC_UPDATE, true); if (err < 0) return err; @@ -1857,8 +1850,8 @@ mt7615_mcu_uni_tx_ba(struct mt7615_dev *dev, mt7615_mcu_sta_ba_tlv(skb, params, enable, true); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_STA_REC_UPDATE, true); } static int @@ -1879,8 +1872,8 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, mt7615_mcu_sta_ba_tlv(skb, params, enable, false); - err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_STA_REC_UPDATE, true); if (err < 0 || !enable) return err; @@ -1898,8 +1891,8 @@ mt7615_mcu_uni_rx_ba(struct mt7615_dev *dev, mt7615_mcu_wtbl_ba_tlv(skb, params, enable, false, sta_wtbl, wtbl_hdr); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_STA_REC_UPDATE, true); } static int @@ -1922,29 +1915,6 @@ static const struct mt7615_mcu_ops uni_update_ops = { .set_fw_ctrl = mt7615_mcu_fw_pmctrl, }; -static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data, - int len) -{ - int ret = 0, cur_len; - - while (len > 0) { - cur_len = min_t(int, 4096 - dev->mt76.mcu_ops->headroom, len); - - ret = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_SCATTER, - data, cur_len, false); - if (ret) - break; - - data += cur_len; - len -= cur_len; - - if (mt76_is_mmio(&dev->mt76)) - mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); - } - - return ret; -} - static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr, u32 option) { @@ -1956,14 +1926,13 @@ static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr, .addr = cpu_to_le32(addr), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_START_REQ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_FW_START_REQ, &req, + sizeof(req), true); } int mt7615_mcu_restart(struct mt76_dev *dev) { - return __mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL, - 0, true); + return mt76_mcu_send_msg(dev, MCU_CMD_RESTART_DL_REQ, NULL, 0, true); } EXPORT_SYMBOL_GPL(mt7615_mcu_restart); @@ -1975,8 +1944,8 @@ static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get) .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_SEM_CONTROL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_SEM_CONTROL, &req, + sizeof(req), true); } static int mt7615_mcu_start_patch(struct mt7615_dev *dev) @@ -1988,8 +1957,8 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev) .check_crc = 0, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_PATCH_FINISH_REQ, &req, + sizeof(req), true); } static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) @@ -2032,7 +2001,8 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name) goto out; } - ret = mt7615_mcu_send_firmware(dev, fw->data + sizeof(*hdr), len); + ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + fw->data + sizeof(*hdr), len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); goto out; @@ -2092,7 +2062,8 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev, return err; } - err = mt7615_mcu_send_firmware(dev, data + offset, len); + err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + data + offset, len); if (err) { dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); return err; @@ -2285,8 +2256,8 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl) .ctrl_val = ctrl }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, - &data, sizeof(data), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, &data, + sizeof(data), true); } static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) @@ -2333,7 +2304,8 @@ static int mt7663_load_n9(struct mt7615_dev *dev, const char *name) goto out; } - ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len); + ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER, + fw->data + offset, len); if (ret) { dev_err(dev->mt76.dev, "Failed to send firmware\n"); goto out; @@ -2467,7 +2439,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev) static const struct mt76_mcu_ops mt7615_mcu_ops = { .headroom = sizeof(struct mt7615_mcu_txd), .mcu_skb_send_msg = mt7615_mcu_send_message, - .mcu_send_msg = mt7615_mcu_msg_send, + .mcu_parse_response = mt7615_mcu_parse_response, .mcu_restart = mt7615_mcu_restart, }; int ret; @@ -2492,7 +2464,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev) if (ret) return ret; - mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); dev_dbg(dev->mt76.dev, "Firmware init done\n"); set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); mt7615_mcu_fw_log_2_host(dev, 0); @@ -2547,8 +2519,8 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev) skb_put_data(skb, &req_hdr, sizeof(req_hdr)); skb_put_data(skb, eep + offset, eep_len); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_EFUSE_BUFFER_MODE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_EFUSE_BUFFER_MODE, true); } EXPORT_SYMBOL_GPL(mt7615_mcu_set_eeprom); @@ -2563,8 +2535,8 @@ int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable) .band = band, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL, &req, + sizeof(req), true); } int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val) @@ -2583,8 +2555,8 @@ int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val) .pkt_thresh = cpu_to_le32(0x2), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, &req, + sizeof(req), true); } int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, @@ -2620,8 +2592,8 @@ int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, if (params->cw_max) req.cw_max = cpu_to_le16(fls(params->cw_max)); - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, + sizeof(req), true); } int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) @@ -2654,13 +2626,13 @@ int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) } while (0) for (i = 0; i < 4; i++) { - bool band = !!(ext_phy->omac_mask & BIT(i)); + bool band = !!(ext_phy->omac_mask & BIT_ULL(i)); ADD_DBDC_ENTRY(DBDC_TYPE_BSS, i, band); } for (i = 0; i < 14; i++) { - bool band = !!(ext_phy->omac_mask & BIT(0x11 + i)); + bool band = !!(ext_phy->omac_mask & BIT_ULL(0x11 + i)); ADD_DBDC_ENTRY(DBDC_TYPE_MBSS, i, band); } @@ -2679,8 +2651,8 @@ int mt7615_mcu_set_dbdc(struct mt7615_dev *dev) ADD_DBDC_ENTRY(DBDC_TYPE_MGMT, 1, 1); out: - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DBDC_CTRL, &req, + sizeof(req), true); } int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev) @@ -2689,8 +2661,8 @@ int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev) .operation = WTBL_RESET_ALL, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_WTBL_UPDATE, &req, + sizeof(req), true); } EXPORT_SYMBOL_GPL(mt7615_mcu_del_wtbl_all); @@ -2711,56 +2683,98 @@ int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev, .val = val, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, &req, + sizeof(req), true); } int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val) { struct { - u16 tag; - u16 min_lpn; + __le16 tag; + __le16 min_lpn; } req = { - .tag = 0x1, - .min_lpn = val, + .tag = cpu_to_le16(0x1), + .min_lpn = cpu_to_le16(val), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, + sizeof(req), true); } int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, const struct mt7615_dfs_pulse *pulse) { struct { - u16 tag; - struct mt7615_dfs_pulse pulse; + __le16 tag; + __le32 max_width; /* us */ + __le32 max_pwr; /* dbm */ + __le32 min_pwr; /* dbm */ + __le32 min_stgr_pri; /* us */ + __le32 max_stgr_pri; /* us */ + __le32 min_cr_pri; /* us */ + __le32 max_cr_pri; /* us */ } req = { - .tag = 0x3, + .tag = cpu_to_le16(0x3), +#define __req_field(field) .field = cpu_to_le32(pulse->field) + __req_field(max_width), + __req_field(max_pwr), + __req_field(min_pwr), + __req_field(min_stgr_pri), + __req_field(max_stgr_pri), + __req_field(min_cr_pri), + __req_field(max_cr_pri), +#undef __req_field }; - memcpy(&req.pulse, pulse, sizeof(*pulse)); - - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, + sizeof(req), true); } int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, const struct mt7615_dfs_pattern *pattern) { struct { - u16 tag; - u16 radar_type; - struct mt7615_dfs_pattern pattern; + __le16 tag; + __le16 radar_type; + u8 enb; + u8 stgr; + u8 min_crpn; + u8 max_crpn; + u8 min_crpr; + u8 min_pw; + u8 max_pw; + __le32 min_pri; + __le32 max_pri; + u8 min_crbn; + u8 max_crbn; + u8 min_stgpn; + u8 max_stgpn; + u8 min_stgpr; } req = { - .tag = 0x2, - .radar_type = index, + .tag = cpu_to_le16(0x2), + .radar_type = cpu_to_le16(index), +#define __req_field_u8(field) .field = pattern->field +#define __req_field_u32(field) .field = cpu_to_le32(pattern->field) + __req_field_u8(enb), + __req_field_u8(stgr), + __req_field_u8(min_crpn), + __req_field_u8(max_crpn), + __req_field_u8(min_crpr), + __req_field_u8(min_pw), + __req_field_u8(max_pw), + __req_field_u32(min_pri), + __req_field_u32(max_pri), + __req_field_u8(min_crbn), + __req_field_u8(max_crbn), + __req_field_u8(min_stgpn), + __req_field_u8(max_stgpn), + __req_field_u8(min_stgpr), +#undef __req_field_u8 +#undef __req_field_u32 }; - memcpy(&req.pattern, pattern, sizeof(*pattern)); - - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, + sizeof(req), true); } int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) @@ -2769,9 +2783,9 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) u8 pulse_num; u8 rsv[3]; struct { - u32 start_time; - u16 width; - s16 power; + __le32 start_time; + __le16 width; + __le16 power; } pattern[32]; } req = { .pulse_num = dev->radar_pattern.n_pulses, @@ -2784,14 +2798,15 @@ int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev) /* TODO: add some noise here */ for (i = 0; i < dev->radar_pattern.n_pulses; i++) { - req.pattern[i].width = dev->radar_pattern.width; - req.pattern[i].power = dev->radar_pattern.power; - req.pattern[i].start_time = start_time + - i * dev->radar_pattern.period; + u32 ts = start_time + i * dev->radar_pattern.period; + + req.pattern[i].width = cpu_to_le16(dev->radar_pattern.width); + req.pattern[i].power = cpu_to_le16(dev->radar_pattern.power); + req.pattern[i].start_time = cpu_to_le32(ts); } - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN, - &req, sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_PATTERN, + &req, sizeof(req), false); } static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku) @@ -2885,7 +2900,7 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) else mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); - return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); } int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index) @@ -2897,8 +2912,8 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index) .action = index, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req, + sizeof(req), true); } int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, @@ -2918,8 +2933,8 @@ int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, .value = cpu_to_le32(val), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, + sizeof(req), false); } int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) @@ -2936,8 +2951,9 @@ int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) .sku_enable = enable, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req, + sizeof(req), true); } int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif) @@ -2957,8 +2973,8 @@ int mt7615_mcu_set_vif_ps(struct mt7615_dev *dev, struct ieee80211_vif *vif) if (vif->type != NL80211_IFTYPE_STATION) return -ENOTSUPP; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_PS_PROFILE, - &req, sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_PS_PROFILE, &req, + sizeof(req), false); } int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy) @@ -3018,8 +3034,8 @@ int mt7615_mcu_set_channel_domain(struct mt7615_phy *phy) skb_put_data(skb, &channel, sizeof(channel)); } - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_CMD_SET_CHAN_DOMAIN, false); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_SET_CHAN_DOMAIN, + false); } #define MT7615_SCAN_CHANNEL_TIME 60 @@ -3101,8 +3117,8 @@ int mt7615_mcu_hw_scan(struct mt7615_phy *phy, struct ieee80211_vif *vif, req->scan_func = 1; } - err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_START_HW_SCAN, - false); + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_START_HW_SCAN, + false); if (err < 0) clear_bit(MT76_HW_SCANNING, &phy->mt76->state); @@ -3130,8 +3146,8 @@ int mt7615_mcu_cancel_hw_scan(struct mt7615_phy *phy, ieee80211_scan_completed(phy->mt76->hw, &info); } - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_CANCEL_HW_SCAN, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_CANCEL_HW_SCAN, &req, + sizeof(req), false); } int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy, @@ -3202,8 +3218,8 @@ int mt7615_mcu_sched_scan_req(struct mt7615_phy *phy, memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len); } - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_CMD_SCHED_SCAN_REQ, false); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_CMD_SCHED_SCAN_REQ, + false); } int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy, @@ -3226,8 +3242,8 @@ int mt7615_mcu_sched_scan_enable(struct mt7615_phy *phy, else clear_bit(MT76_HW_SCHED_SCANNING, &phy->mt76->state); - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SCHED_SCAN_ENABLE, - &req, sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SCHED_SCAN_ENABLE, &req, + sizeof(req), false); } static int mt7615_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) @@ -3366,8 +3382,8 @@ again: out: req.center_freq = cpu_to_le16(center_freq); - ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req, - sizeof(req), true); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RXDCOC_CAL, &req, + sizeof(req), true); if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { @@ -3487,8 +3503,8 @@ again: out: req.center_freq = cpu_to_le16(center_freq); - ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req, - sizeof(req), true); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXDPD_CAL, &req, + sizeof(req), true); if ((chandef->width == NL80211_CHAN_WIDTH_80P80 || chandef->width == NL80211_CHAN_WIDTH_160) && !req.is_freq2) { @@ -3528,17 +3544,16 @@ int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, }; int err; - if (vif->type != NL80211_IFTYPE_STATION || - !mt7615_firmware_offload(dev)) - return -ENOTSUPP; + if (vif->type != NL80211_IFTYPE_STATION) + return 0; - err = __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, - &req_hdr, sizeof(req_hdr), false); + err = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_ABORT, &req_hdr, + sizeof(req_hdr), false); if (err < 0 || !enable) return err; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, - &req, sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, &req, + sizeof(req), false); } #ifdef CONFIG_PM @@ -3569,9 +3584,11 @@ int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend) req.hdr.hif_type = 2; else if (mt76_is_usb(&dev->mt76)) req.hdr.hif_type = 1; + else if (mt76_is_sdio(&dev->mt76)) + req.hdr.hif_type = 0; - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, &req, + sizeof(req), true); } EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend); @@ -3587,6 +3604,7 @@ mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif, u8 pad[3]; } __packed hdr; struct mt7615_wow_ctrl_tlv wow_ctrl_tlv; + struct mt7615_wow_gpio_param_tlv gpio_tlv; } req = { .hdr = { .bss_idx = mvif->idx, @@ -3596,6 +3614,11 @@ mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif, .len = cpu_to_le16(sizeof(struct mt7615_wow_ctrl_tlv)), .cmd = suspend ? 1 : 2, }, + .gpio_tlv = { + .tag = cpu_to_le16(UNI_SUSPEND_WOW_GPIO_PARAM), + .len = cpu_to_le16(sizeof(struct mt7615_wow_gpio_param_tlv)), + .gpio_pin = 0xff, /* follow fw about GPIO pin */ + }, }; if (wowlan->magic_pkt) @@ -3609,12 +3632,14 @@ mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif, } if (mt76_is_mmio(&dev->mt76)) - req.wow_ctrl_tlv.wakeup_hif = 2; + req.wow_ctrl_tlv.wakeup_hif = WOW_PCIE; else if (mt76_is_usb(&dev->mt76)) - req.wow_ctrl_tlv.wakeup_hif = 1; + req.wow_ctrl_tlv.wakeup_hif = WOW_USB; + else if (mt76_is_sdio(&dev->mt76)) + req.wow_ctrl_tlv.wakeup_hif = WOW_GPIO; - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, &req, + sizeof(req), true); } static int @@ -3649,8 +3674,8 @@ mt7615_mcu_set_wow_pattern(struct mt7615_dev *dev, memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len); memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_SUSPEND, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_SUSPEND, + true); } static int @@ -3678,8 +3703,8 @@ mt7615_mcu_set_suspend_mode(struct mt7615_dev *dev, }, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_SUSPEND, &req, + sizeof(req), true); } static int @@ -3705,8 +3730,8 @@ mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev, }, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, &req, + sizeof(req), true); } static int @@ -3731,8 +3756,8 @@ mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif, }, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, &req, + sizeof(req), true); } void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, @@ -3744,8 +3769,6 @@ void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; int i; - mt7615_mcu_set_bss_pm(phy->dev, vif, suspend); - mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend); mt7615_mcu_set_arp_filter(phy->dev, vif, suspend); @@ -3822,8 +3845,8 @@ int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw, memcpy(gtk_tlv->kck, key->kck, NL80211_KCK_LEN); memcpy(gtk_tlv->replay_ctr, key->replay_ctr, NL80211_REPLAY_CTR_LEN); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_OFFLOAD, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD, + true); } #endif /* CONFIG_PM */ @@ -3843,8 +3866,8 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, phy->roc_grant = false; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req, + sizeof(req), false); } int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, @@ -3890,8 +3913,8 @@ int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); } - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD_OFFLOAD, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD, + true); } int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, @@ -3912,8 +3935,8 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, if (!mt7615_firmware_offload(dev)) return -ENOTSUPP; - return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, - &req, sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, &req, + sizeof(req), false); } u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset) @@ -3925,8 +3948,8 @@ u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset) .addr = cpu_to_le32(offset), }; - return __mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req), + true); } EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr); @@ -3940,7 +3963,6 @@ void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val) .val = cpu_to_le32(val), }; - __mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, - &req, sizeof(req), false); + mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false); } EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 7b856e9eee1e..6ef5670211d1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -275,6 +275,7 @@ enum { MCU_EXT_CMD_PROTECT_CTRL = 0x3e, MCU_EXT_CMD_DBDC_CTRL = 0x45, MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, + MCU_EXT_CMD_MUAR_UPDATE = 0x48, MCU_EXT_CMD_BCN_OFFLOAD = 0x49, MCU_EXT_CMD_SET_RX_PATH = 0x4e, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, @@ -477,6 +478,12 @@ struct mt7615_bss_qos_tlv { u8 pad[3]; } __packed; +enum { + WOW_USB = 1, + WOW_PCIE = 2, + WOW_GPIO = 3, +}; + struct mt7615_wow_ctrl_tlv { __le16 tag; __le16 len; @@ -501,6 +508,16 @@ struct mt7615_wow_ctrl_tlv { u8 rsv[4]; } __packed; +struct mt7615_wow_gpio_param_tlv { + __le16 tag; + __le16 len; + u8 gpio_pin; + u8 trigger_lvl; + u8 pad[2]; + __le32 gpio_interval; + u8 rsv[4]; +} __packed; + #define MT7615_WOW_MASK_MAX_LEN 16 #define MT7615_WOW_PATTEN_MAX_LEN 128 struct mt7615_wow_pattern_tlv { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index 6de492a4cf02..a7f92fa0488f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -98,9 +98,9 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance) return IRQ_HANDLED; } -static void mt7615_irq_tasklet(unsigned long data) +static void mt7615_irq_tasklet(struct tasklet_struct *t) { - struct mt7615_dev *dev = (struct mt7615_dev *)data; + struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet); u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev); mt76_wr(dev, MT_INT_MASK_CSR, 0); @@ -203,7 +203,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, dev = container_of(mdev, struct mt7615_dev, mt76); mt76_mmio_init(&dev->mt76, mem_base); - tasklet_init(&dev->irq_tasklet, mt7615_irq_tasklet, (unsigned long)dev); + tasklet_setup(&dev->irq_tasklet, mt7615_irq_tasklet); dev->reg_map = map; dev->ops = ops; @@ -240,7 +240,8 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(&dev->mt76); + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 6a9f9187f76a..99b8abdbb08f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -11,7 +11,7 @@ #include "../mt76.h" #include "regs.h" -#define MT7615_MAX_INTERFACES 4 +#define MT7615_MAX_INTERFACES 16 #define MT7615_MAX_WMM_SETS 4 #define MT7663_WTBL_SIZE 32 #define MT7615_WTBL_SIZE 128 @@ -106,29 +106,11 @@ struct mt7615_rate_desc { u8 bw; }; -enum mt7615_wtbl_desc_type { - MT7615_WTBL_RATE_DESC, - MT7615_WTBL_KEY_DESC -}; - -struct mt7615_key_desc { - enum set_key_cmd cmd; - u32 cipher; - s8 keyidx; - u8 keylen; - u8 *key; -}; - -struct mt7615_wtbl_desc { +struct mt7615_wtbl_rate_desc { struct list_head node; - enum mt7615_wtbl_desc_type type; + struct mt7615_rate_desc rate; struct mt7615_sta *sta; - - union { - struct mt7615_rate_desc rate; - struct mt7615_key_desc key; - }; }; struct mt7615_sta { @@ -175,8 +157,10 @@ struct mt7615_phy { struct ieee80211_vif *monitor_vif; + u8 n_beacon_vif; + u32 rxfilter; - u32 omac_mask; + u64 omac_mask; u16 noise; @@ -254,7 +238,7 @@ struct mt7615_dev { struct tasklet_struct irq_tasklet; struct mt7615_phy phy; - u32 omac_mask; + u64 omac_mask; u16 chainmask; @@ -289,20 +273,22 @@ struct mt7615_dev { u8 fw_ver; - struct work_struct wtbl_work; - struct list_head wd_head; + struct work_struct rate_work; + struct list_head wrd_head; u32 debugfs_rf_wf; u32 debugfs_rf_reg; + u32 muar_mask; + #ifdef CONFIG_NL80211_TESTMODE struct { u32 *reg_backup; s16 last_freq_offset; u8 last_rcpi[4]; - s8 last_ib_rssi; - s8 last_wb_rssi; + s8 last_ib_rssi[4]; + s8 last_wb_rssi[4]; } test; #endif @@ -344,24 +330,13 @@ enum { HW_BSSID_1, HW_BSSID_2, HW_BSSID_3, - HW_BSSID_MAX, + HW_BSSID_MAX = HW_BSSID_3, EXT_BSSID_START = 0x10, EXT_BSSID_1, - EXT_BSSID_2, - EXT_BSSID_3, - EXT_BSSID_4, - EXT_BSSID_5, - EXT_BSSID_6, - EXT_BSSID_7, - EXT_BSSID_8, - EXT_BSSID_9, - EXT_BSSID_10, - EXT_BSSID_11, - EXT_BSSID_12, - EXT_BSSID_13, - EXT_BSSID_14, - EXT_BSSID_15, - EXT_BSSID_END + EXT_BSSID_15 = 0x1f, + EXT_BSSID_MAX = EXT_BSSID_15, + REPEATER_BSSID_START = 0x20, + REPEATER_BSSID_MAX = 0x3f, }; enum { @@ -452,7 +427,6 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev); void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *probe_rate, struct ieee80211_tx_rate *rates); -int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable); void mt7615_pm_wake_work(struct work_struct *work); int mt7615_pm_wake(struct mt7615_dev *dev); void mt7615_pm_power_save_sched(struct mt7615_dev *dev); @@ -542,7 +516,7 @@ static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev) { - return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx); + return MT_INT_TX_DONE(dev->mt76.q_mcu[MT_MCUQ_WM]->hw_idx); } void mt7615_dma_reset(struct mt7615_dev *dev); @@ -568,28 +542,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct ieee80211_sta *sta, int pid, struct ieee80211_key_conf *key, bool beacon); void mt7615_mac_set_timing(struct mt7615_phy *phy); +int __mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, + struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd); int mt7615_mac_wtbl_set_key(struct mt7615_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, enum set_key_cmd cmd); -int mt7615_mac_wtbl_update_pk(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, - int keyidx, enum set_key_cmd cmd); -void mt7615_mac_wtbl_update_cipher(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - enum mt7615_cipher_type cipher, - enum set_key_cmd cmd); -int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, - struct mt76_wcid *wcid, - u8 *key, u8 keylen, - enum mt7615_cipher_type cipher, - enum set_key_cmd cmd); void mt7615_mac_reset_work(struct work_struct *work); u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid); -int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); -int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, - int len, bool wait_resp); +int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq); u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg); int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val); int mt7615_mcu_set_dbdc(struct mt7615_dev *dev); @@ -651,6 +615,9 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, int mt7615_init_debugfs(struct mt7615_dev *dev); int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); +int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy, + struct ieee80211_vif *vif, + bool enable); int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend); @@ -674,14 +641,13 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); -void mt7663_usb_sdio_wtbl_work(struct work_struct *work); int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); int mt7663u_mcu_init(struct mt7615_dev *dev); /* sdio */ u32 mt7663s_read_pcr(struct mt7615_dev *dev); int mt7663s_mcu_init(struct mt7615_dev *dev); -void mt7663s_tx_work(struct work_struct *work); +void mt7663s_txrx_worker(struct mt76_worker *w); void mt7663s_rx_work(struct work_struct *work); void mt7663s_sdio_irq(struct sdio_func *func); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c index 06a0f8f7bc89..27fcb1374685 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -16,8 +16,15 @@ static void mt7615_init_work(struct work_struct *work) { struct mt7615_dev *dev = container_of(work, struct mt7615_dev, mcu_work); + int i, ret; - if (mt7615_mcu_init(dev)) + ret = mt7615_mcu_init(dev); + for (i = 0; (ret == -EAGAIN) && (i < 10); i++) { + msleep(200); + ret = mt7615_mcu_init(dev); + } + + if (ret) return; mt7615_mcu_set_eeprom(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 61623f480806..6e5db015b32c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -333,6 +333,9 @@ enum mt7615_reg_base { #define MT_WF_RFCR_DROP_NDPA BIT(20) #define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) +#define MT_WF_RMAC_MORE(_band) MT_WF_RMAC((_band) ? 0x124 : 0x024) +#define MT_WF_RMAC_MORE_MUAR_MODE GENMASK(31, 30) + #define MT_WF_RFCR1(_band) MT_WF_RMAC((_band) ? 0x104 : 0x004) #define MT_WF_RFCR1_DROP_ACK BIT(4) #define MT_WF_RFCR1_DROP_BF_POLL BIT(5) @@ -342,6 +345,14 @@ enum mt7615_reg_base { #define MT_CHFREQ(_band) MT_WF_RMAC((_band) ? 0x130 : 0x030) +#define MT_WF_RMAC_MAR0 MT_WF_RMAC(0x025c) +#define MT_WF_RMAC_MAR1 MT_WF_RMAC(0x0260) +#define MT_WF_RMAC_MAR1_ADDR GENMASK(15, 0) +#define MT_WF_RMAC_MAR1_START BIT(16) +#define MT_WF_RMAC_MAR1_WRITE BIT(17) +#define MT_WF_RMAC_MAR1_IDX GENMASK(29, 24) +#define MT_WF_RMAC_MAR1_GROUP GENMASK(31, 30) + #define MT_WF_RMAC_MIB_TIME0 MT_WF_RMAC(0x03c4) #define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31) #define MT_WF_RMAC_MIB_RXTIME_EN BIT(30) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index 874c929d8552..347975eaba86 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -294,30 +294,6 @@ release: return ret; } -static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) -{ - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - struct mt76_sdio *sdio = &mdev->sdio; - u32 pse, ple; - int err; - - err = mt7615_mac_sta_add(mdev, vif, sta); - if (err < 0) - return err; - - /* init sched data quota */ - pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); - ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); - - mutex_lock(&sdio->sched.lock); - sdio->sched.pse_data_quota = pse; - sdio->sched.ple_data_quota = ple; - mutex_unlock(&sdio->sched.lock); - - return 0; -} - static int mt7663s_probe(struct sdio_func *func, const struct sdio_device_id *id) { @@ -329,7 +305,7 @@ static int mt7663s_probe(struct sdio_func *func, .tx_status_data = mt7663_usb_sdio_tx_status_data, .rx_skb = mt7615_queue_rx_skb, .sta_ps = mt7615_sta_ps, - .sta_add = mt7663s_sta_add, + .sta_add = mt7615_mac_sta_add, .sta_remove = mt7615_mac_sta_remove, .update_survey = mt7615_update_channel, }; @@ -366,14 +342,11 @@ static int mt7663s_probe(struct sdio_func *func, ret = mt76s_init(mdev, func, &mt7663s_ops); if (ret < 0) - goto err_free; - - INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work); - INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work); + goto error; ret = mt7663s_hw_init(dev, func); if (ret) - goto err_deinit; + goto error; mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | (mt76_rr(dev, MT_HW_REV) & 0xff); @@ -384,7 +357,7 @@ static int mt7663s_probe(struct sdio_func *func, GFP_KERNEL); if (!mdev->sdio.intr_data) { ret = -ENOMEM; - goto err_deinit; + goto error; } for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) { @@ -393,23 +366,29 @@ static int mt7663s_probe(struct sdio_func *func, GFP_KERNEL); if (!mdev->sdio.xmit_buf[i]) { ret = -ENOMEM; - goto err_deinit; + goto error; } } ret = mt76s_alloc_queues(&dev->mt76); if (ret) - goto err_deinit; + goto error; + + ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker, + mt7663s_txrx_worker, "sdio-txrx"); + if (ret) + goto error; + + sched_set_fifo_low(mdev->sdio.txrx_worker.task); ret = mt7663_usb_sdio_register_device(dev); if (ret) - goto err_deinit; + goto error; return 0; -err_deinit: +error: mt76s_deinit(&dev->mt76); -err_free: mt76_free_device(&dev->mt76); return ret; @@ -432,6 +411,7 @@ static int mt7663s_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct mt7615_dev *mdev = sdio_get_drvdata(func); + int err; if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && mt7615_firmware_offload(mdev)) { @@ -444,9 +424,20 @@ static int mt7663s_suspend(struct device *dev) sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); - mt76s_stop_txrx(&mdev->mt76); + err = mt7615_mcu_set_fw_ctrl(mdev); + if (err) + return err; - return mt7615_mcu_set_fw_ctrl(mdev); + mt76_worker_disable(&mdev->mt76.sdio.txrx_worker); + mt76_worker_disable(&mdev->mt76.sdio.status_worker); + mt76_worker_disable(&mdev->mt76.sdio.net_worker); + + cancel_work_sync(&mdev->mt76.sdio.stat_work); + clear_bit(MT76_READING_STATS, &mdev->mphy.state); + + mt76_tx_status_check(&mdev->mt76, NULL, true); + + return 0; } static int mt7663s_resume(struct device *dev) @@ -455,6 +446,10 @@ static int mt7663s_resume(struct device *dev) struct mt7615_dev *mdev = sdio_get_drvdata(func); int err; + mt76_worker_enable(&mdev->mt76.sdio.txrx_worker); + mt76_worker_enable(&mdev->mt76.sdio.status_worker); + mt76_worker_enable(&mdev->mt76.sdio.net_worker); + err = mt7615_mcu_set_drv_ctrl(mdev); if (err) return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c index 38670c00380c..17fe4187d1de 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c @@ -19,46 +19,34 @@ static int mt7663s_mcu_init_sched(struct mt7615_dev *dev) { struct mt76_sdio *sdio = &dev->mt76.sdio; - u32 pse0, ple, pse1, txdwcnt; - - pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); - pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA); - ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); + u32 txdwcnt; + + sdio->sched.pse_data_quota = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, + MT_HIF0_MIN_QUOTA); + sdio->sched.pse_mcu_quota = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, + MT_HIF1_MIN_QUOTA); + sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, + MT_HIF0_MIN_QUOTA); txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT, MT_PP_TXDWCNT_TX1_ADD_DW_CNT); - - mutex_lock(&sdio->sched.lock); - - sdio->sched.pse_data_quota = pse0; - sdio->sched.ple_data_quota = ple; - sdio->sched.pse_mcu_quota = pse1; sdio->sched.deficit = txdwcnt << 2; - mutex_unlock(&sdio->sched.lock); - return 0; } static int mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, bool wait_resp) + int cmd, int *seq) { struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - int ret, seq; - - mutex_lock(&mdev->mcu.mutex); + int ret; - mt7615_mcu_fill_msg(dev, skb, cmd, &seq); - ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0); + mt7615_mcu_fill_msg(dev, skb, cmd, seq); + ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, 0); if (ret) - goto out; - - mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]); - if (wait_resp) - ret = mt7615_mcu_wait_response(dev, cmd, seq); + return ret; -out: - mutex_unlock(&mdev->mcu.mutex); + mt76_queue_kick(dev, mdev->q_mcu[MT_MCUQ_WM]); return ret; } @@ -127,7 +115,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev) .headroom = sizeof(struct mt7615_mcu_txd), .tailroom = MT_USB_TAIL_SIZE, .mcu_skb_send_msg = mt7663s_mcu_send_message, - .mcu_send_msg = mt7615_mcu_msg_send, + .mcu_parse_response = mt7615_mcu_parse_response, .mcu_restart = mt7615_mcu_restart, .mcu_rr = mt7615_mcu_reg_rr, .mcu_wr = mt7615_mcu_reg_wr, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c index 2486cda3243b..13d77f8fca86 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c @@ -46,11 +46,9 @@ static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data) if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota) return 0; - mutex_lock(&sdio->sched.lock); sdio->sched.pse_mcu_quota += pse_mcu_quota; sdio->sched.pse_data_quota += pse_data_quota; sdio->sched.ple_data_quota += ple_data_quota; - mutex_unlock(&sdio->sched.lock); return pse_data_quota + ple_data_quota + pse_mcu_quota; } @@ -105,10 +103,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, buf = page_address(page); - sdio_claim_host(sdio->func); err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); - sdio_release_host(sdio->func); - if (err < 0) { dev_err(dev->dev, "sdio read data failed:%d\n", err); __free_pages(page, order); @@ -138,19 +133,52 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, return i; } -static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid, - int buf_sz, int *pse_size, int *ple_size) +static int mt7663s_rx_handler(struct mt76_dev *dev) +{ + struct mt76_sdio *sdio = &dev->sdio; + struct mt76s_intr *intr = sdio->intr_data; + int nframes = 0, ret; + + ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr)); + if (ret < 0) + return ret; + + trace_dev_irq(dev, intr->isr, 0); + + if (intr->isr & WHIER_RX0_DONE_INT_EN) { + ret = mt7663s_rx_run_queue(dev, 0, intr); + if (ret > 0) { + mt76_worker_schedule(&sdio->net_worker); + nframes += ret; + } + } + + if (intr->isr & WHIER_RX1_DONE_INT_EN) { + ret = mt7663s_rx_run_queue(dev, 1, intr); + if (ret > 0) { + mt76_worker_schedule(&sdio->net_worker); + nframes += ret; + } + } + + nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr); + + return nframes; +} + +static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz, + int *pse_size, int *ple_size) { int pse_sz; pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); - if (qid == MT_TXQ_MCU) { + if (mcu) { if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz) return -EBUSY; } else { if (sdio->sched.pse_data_quota < *pse_size + pse_sz || - sdio->sched.ple_data_quota < *ple_size) + sdio->sched.ple_data_quota < *ple_size + 1) return -EBUSY; *ple_size = *ple_size + 1; @@ -160,17 +188,15 @@ static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid, return 0; } -static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid, +static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, int pse_size, int ple_size) { - mutex_lock(&sdio->sched.lock); - if (qid == MT_TXQ_MCU) { + if (mcu) { sdio->sched.pse_mcu_quota -= pse_size; } else { sdio->sched.pse_data_quota -= pse_size; sdio->sched.ple_data_quota -= ple_size; } - mutex_unlock(&sdio->sched.lock); } static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) @@ -181,22 +207,20 @@ static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len) if (len > sdio->func->cur_blksize) len = roundup(len, sdio->func->cur_blksize); - sdio_claim_host(sdio->func); err = sdio_writesb(sdio->func, MCR_WTDR1, data, len); - sdio_release_host(sdio->func); - if (err) dev_err(dev->dev, "sdio write failed: %d\n", err); return err; } -static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid) +static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q) { - int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; - struct mt76_queue *q = dev->q_tx[qid]; + int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0; + bool mcu = q == dev->q_mcu[MT_MCUQ_WM]; struct mt76_sdio *sdio = &dev->sdio; + qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid; while (q->first != q->head) { struct mt76_queue_entry *e = &q->entry[q->first]; struct sk_buff *iter; @@ -214,7 +238,7 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid) if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ) break; - if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz, + if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz, &ple_sz)) break; @@ -240,78 +264,44 @@ next: if (err) return err; } - mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz); + mt7663s_tx_update_quota(sdio, mcu, pse_sz, ple_sz); - return nframes; -} - -void mt7663s_tx_work(struct work_struct *work) -{ - struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, - tx.xmit_work); - struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); - int i, nframes = 0; + mt76_worker_schedule(&sdio->status_worker); - for (i = 0; i < MT_TXQ_MCU_WA; i++) { - int ret; - - ret = mt7663s_tx_run_queue(dev, i); - if (ret < 0) - break; - - nframes += ret; - } - if (nframes) - queue_work(sdio->txrx_wq, &sdio->tx.xmit_work); - - queue_work(sdio->txrx_wq, &sdio->tx.status_work); + return nframes; } -void mt7663s_rx_work(struct work_struct *work) +void mt7663s_txrx_worker(struct mt76_worker *w) { - struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, - rx.recv_work); + struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, + txrx_worker); struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); - struct mt76s_intr *intr = sdio->intr_data; - int nframes = 0, ret; + int i, nframes, ret; /* disable interrupt */ sdio_claim_host(sdio->func); sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL); - ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr)); - sdio_release_host(sdio->func); - - if (ret < 0) - goto out; - trace_dev_irq(dev, intr->isr, 0); + do { + nframes = 0; - if (intr->isr & WHIER_RX0_DONE_INT_EN) { - ret = mt7663s_rx_run_queue(dev, 0, intr); - if (ret > 0) { - queue_work(sdio->txrx_wq, &sdio->rx.net_work); - nframes += ret; + /* tx */ + for (i = 0; i <= MT_TXQ_PSD; i++) { + ret = mt7663s_tx_run_queue(dev, dev->phy.q_tx[i]); + if (ret > 0) + nframes += ret; } - } - - if (intr->isr & WHIER_RX1_DONE_INT_EN) { - ret = mt7663s_rx_run_queue(dev, 1, intr); - if (ret > 0) { - queue_work(sdio->txrx_wq, &sdio->rx.net_work); + ret = mt7663s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]); + if (ret > 0) nframes += ret; - } - } - if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr)) - queue_work(sdio->txrx_wq, &sdio->tx.xmit_work); + /* rx */ + ret = mt7663s_rx_handler(dev); + if (ret > 0) + nframes += ret; + } while (nframes > 0); - if (nframes) { - queue_work(sdio->txrx_wq, &sdio->rx.recv_work); - return; - } -out: /* enable interrupt */ - sdio_claim_host(sdio->func); sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL); sdio_release_host(sdio->func); } @@ -324,5 +314,5 @@ void mt7663s_sdio_irq(struct sdio_func *func) if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state)) return; - queue_work(sdio->txrx_wq, &sdio->rx.recv_work); + mt76_worker_schedule(&sdio->txrx_worker); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c index e4dc62314bae..8fc97a52411a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c @@ -90,8 +90,8 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy) data[ret - MT_EE_NIC_CONF_0] = tx_power[i]; } - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_SET_TX_POWER_CTRL, false); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_SET_TX_POWER_CTRL, false); } static void @@ -335,9 +335,7 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg) if (!rx) return -ENOMEM; - if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) || - nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) || - nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi)) + if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset)) return -ENOMEM; rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI); @@ -350,6 +348,26 @@ mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg) nla_nest_end(msg, rssi); + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_IB_RSSI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->test.last_ib_rssi); i++) + if (nla_put_s8(msg, i, dev->test.last_ib_rssi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_WB_RSSI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->test.last_wb_rssi); i++) + if (nla_put_s8(msg, i, dev->test.last_wb_rssi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + nla_nest_end(msg, rx); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index f0ad83af9e00..a60cfa345521 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -126,21 +126,20 @@ static int mt7663u_probe(struct usb_interface *usb_intf, alloc_queues: ret = mt76u_alloc_mcu_queue(&dev->mt76); if (ret) - goto error_free_q; + goto error; ret = mt76u_alloc_queues(&dev->mt76); if (ret) - goto error_free_q; + goto error; ret = mt7663_usb_sdio_register_device(dev); if (ret) - goto error_free_q; + goto error; return 0; -error_free_q: - mt76u_queues_deinit(&dev->mt76); error: + mt76u_queues_deinit(&dev->mt76); usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c index 4d8be366af31..c55698f9c49a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -15,14 +15,12 @@ static int mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, bool wait_resp) + int cmd, int *seq) { struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - int ret, seq, ep, len, pad; + int ret, ep, len, pad; - mutex_lock(&mdev->mcu.mutex); - - mt7615_mcu_fill_msg(dev, skb, cmd, &seq); + mt7615_mcu_fill_msg(dev, skb, cmd, seq); if (cmd != MCU_CMD_FW_SCATTER) ep = MT_EP_OUT_INBAND_CMD; else @@ -37,14 +35,8 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, 1000, ep); - if (ret < 0) - goto out; - - if (wait_resp) - ret = mt7615_mcu_wait_response(dev, cmd, seq); out: - mutex_unlock(&mdev->mcu.mutex); dev_kfree_skb(skb); return ret; @@ -56,7 +48,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev) .headroom = MT_USB_HDR_SIZE + sizeof(struct mt7615_mcu_txd), .tailroom = MT_USB_TAIL_SIZE, .mcu_skb_send_msg = mt7663u_mcu_send_message, - .mcu_send_msg = mt7615_mcu_msg_send, + .mcu_parse_response = mt7615_mcu_parse_response, .mcu_restart = mt7615_mcu_restart, }; int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 3b29a6d3dc64..203256862dfd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -61,12 +61,11 @@ mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, skb_push(skb, MT_USB_TXD_SIZE); } -static int -mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, - struct mt7615_wtbl_desc *wd) +static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, + struct mt7615_wtbl_rate_desc *wrd) { - struct mt7615_rate_desc *rate = &wd->rate; - struct mt7615_sta *sta = wd->sta; + struct mt7615_rate_desc *rate = &wrd->rate; + struct mt7615_sta *sta = wrd->sta; u32 w5, w27, addr, val; lockdep_assert_held(&dev->mt76.mutex); @@ -132,86 +131,30 @@ mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, return 0; } -static int -mt7663_usb_sdio_set_key(struct mt7615_dev *dev, - struct mt7615_wtbl_desc *wd) +static void mt7663_usb_sdio_rate_work(struct work_struct *work) { - struct mt7615_key_desc *key = &wd->key; - struct mt7615_sta *sta = wd->sta; - enum mt7615_cipher_type cipher; - struct mt76_wcid *wcid; - int err; - - lockdep_assert_held(&dev->mt76.mutex); - - if (!sta) { - err = -EINVAL; - goto out; - } - - cipher = mt7615_mac_get_cipher(key->cipher); - if (cipher == MT_CIPHER_NONE) { - err = -EOPNOTSUPP; - goto out; - } - - wcid = &wd->sta->wcid; - - mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd); - err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, - cipher, key->cmd); - if (err < 0) - goto out; - - err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, - key->cmd); - if (err < 0) - goto out; - - if (key->cmd == SET_KEY) - wcid->cipher |= BIT(cipher); - else - wcid->cipher &= ~BIT(cipher); -out: - kfree(key->key); - - return err; -} - -void mt7663_usb_sdio_wtbl_work(struct work_struct *work) -{ - struct mt7615_wtbl_desc *wd, *wd_next; - struct list_head wd_list; + struct mt7615_wtbl_rate_desc *wrd, *wrd_next; + struct list_head wrd_list; struct mt7615_dev *dev; dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, - wtbl_work); + rate_work); - INIT_LIST_HEAD(&wd_list); + INIT_LIST_HEAD(&wrd_list); spin_lock_bh(&dev->mt76.lock); - list_splice_init(&dev->wd_head, &wd_list); + list_splice_init(&dev->wrd_head, &wrd_list); spin_unlock_bh(&dev->mt76.lock); - list_for_each_entry_safe(wd, wd_next, &wd_list, node) { - list_del(&wd->node); + list_for_each_entry_safe(wrd, wrd_next, &wrd_list, node) { + list_del(&wrd->node); mt7615_mutex_acquire(dev); - - switch (wd->type) { - case MT7615_WTBL_RATE_DESC: - mt7663_usb_sdio_set_rates(dev, wd); - break; - case MT7615_WTBL_KEY_DESC: - mt7663_usb_sdio_set_key(dev, wd); - break; - } - + mt7663_usb_sdio_set_rates(dev, wrd); mt7615_mutex_release(dev); - kfree(wd); + kfree(wrd); } } -EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work); bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) { @@ -357,8 +300,8 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev) struct ieee80211_hw *hw = mt76_hw(dev); int err; - INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work); - INIT_LIST_HEAD(&dev->wd_head); + INIT_WORK(&dev->rate_work, mt7663_usb_sdio_rate_work); + INIT_LIST_HEAD(&dev->wrd_head); mt7615_init_device(dev); err = mt7663_usb_sdio_init_hardware(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 9087607b621e..dd66fd12a2e6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -52,15 +52,15 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev) mt76x02_eeprom_parse_hw_cap(dev); dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n", - dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz); + dev->mphy.cap.has_2ghz, dev->mphy.cap.has_5ghz); if (dev->no_2ghz) { - dev->mt76.cap.has_2ghz = false; + dev->mphy.cap.has_2ghz = false; dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); } if (is_mt7630(dev)) { - dev->mt76.cap.has_5ghz = false; + dev->mphy.cap.has_5ghz = false; dev_dbg(dev->mt76.dev, "mask out 5GHz support\n"); } @@ -342,10 +342,10 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev) dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n", version, fae); - memcpy(dev->mt76.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + memcpy(dev->mphy.macaddr, (u8 *)dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN); - mt76_eeprom_override(&dev->mt76); - mt76x02_mac_setaddr(dev, dev->mt76.macaddr); + mt76_eeprom_override(&dev->mphy); + mt76x02_mac_setaddr(dev, dev->mphy.macaddr); mt76x0_set_chip_cap(dev); mt76x0_set_freq_offset(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index d78866bf41ba..0bac39bf3b66 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -245,7 +245,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev) if (ret) return ret; - if (dev->mt76.cap.has_5ghz) { + if (dev->mphy.cap.has_5ghz) { struct ieee80211_supported_band *sband; sband = &dev->mphy.sband_5g.sband; @@ -253,7 +253,7 @@ int mt76x0_register_device(struct mt76x02_dev *dev) mt76x0_init_txpower(dev, sband); } - if (dev->mt76.cap.has_2ghz) + if (dev->mphy.cap.has_2ghz) mt76x0_init_txpower(dev, &dev->mphy.sband_2g.sband); mt76x02_init_debugfs(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index dda11c704aba..b87d8e136cb9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -194,7 +194,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(&dev->mt76); + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c index 007c762c6db1..f0962507f72f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c @@ -117,6 +117,7 @@ int mt76x0e_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x0e_mcu_ops = { .mcu_send_msg = mt76x02_mcu_msg_send, + .mcu_parse_response = mt76x02_mcu_parse_response, }; int err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 3de33aadf794..e91c314cdfac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -447,11 +447,11 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev) else coex3 |= BIT(4); coex3 |= BIT(3); - if (dev->mt76.cap.has_2ghz) + if (dev->mphy.cap.has_2ghz) wlan |= BIT(6); } else { /* sigle antenna mode */ - if (dev->mt76.cap.has_5ghz) { + if (dev->mphy.cap.has_5ghz) { coex3 |= BIT(3) | BIT(4); } else { wlan |= BIT(6); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index ce6b286a8152..b12cb17cb43d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -277,6 +277,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); + mt76u_queues_deinit(&dev->mt76); mt76_free_device(&dev->mt76); return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index b29cd39dc176..a601350531cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -609,10 +609,11 @@ static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev) } } -static void mt76x02_dfs_tasklet(unsigned long arg) +static void mt76x02_dfs_tasklet(struct tasklet_struct *t) { - struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; - struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + struct mt76x02_dfs_pattern_detector *dfs_pd = from_tasklet(dfs_pd, t, + dfs_tasklet); + struct mt76x02_dev *dev = container_of(dfs_pd, typeof(*dev), dfs_pd); u32 engine_mask; int i; @@ -860,8 +861,7 @@ void mt76x02_dfs_init_detector(struct mt76x02_dev *dev) INIT_LIST_HEAD(&dfs_pd->seq_pool); dev->mt76.region = NL80211_DFS_UNSET; dfs_pd->last_sw_check = jiffies; - tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet, - (unsigned long)dev); + tasklet_setup(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet); } static void diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index c54c50fd639a..0acabba2d1a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -75,14 +75,14 @@ void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev) switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) { case BOARD_TYPE_5GHZ: - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_5ghz = true; break; case BOARD_TYPE_2GHZ: - dev->mt76.cap.has_2ghz = true; + dev->mphy.cap.has_2ghz = true; break; default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + dev->mphy.cap.has_2ghz = true; + dev->mphy.cap.has_5ghz = true; break; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index da6d3f51f6d4..16b40a73fd1f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -727,24 +727,24 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr) static const u8 null_addr[ETH_ALEN] = {}; int i; - ether_addr_copy(dev->mt76.macaddr, addr); + ether_addr_copy(dev->mphy.macaddr, addr); - if (!is_valid_ether_addr(dev->mt76.macaddr)) { - eth_random_addr(dev->mt76.macaddr); + if (!is_valid_ether_addr(dev->mphy.macaddr)) { + eth_random_addr(dev->mphy.macaddr); dev_info(dev->mt76.dev, "Invalid MAC address, using random address %pM\n", - dev->mt76.macaddr); + dev->mphy.macaddr); } - mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr)); + mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mphy.macaddr)); mt76_wr(dev, MT_MAC_ADDR_DW1, - get_unaligned_le16(dev->mt76.macaddr + 4) | + get_unaligned_le16(dev->mphy.macaddr + 4) | FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); mt76_wr(dev, MT_MAC_BSSID_DW0, - get_unaligned_le32(dev->mt76.macaddr)); + get_unaligned_le32(dev->mphy.macaddr)); mt76_wr(dev, MT_MAC_BSSID_DW1, - get_unaligned_le16(dev->mt76.macaddr + 4) | + get_unaligned_le16(dev->mphy.macaddr + 4) | FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); /* enable 7 additional beacon slots and control them with bypass mask */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index 267058086a90..4aa5c36afeaf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -10,6 +10,28 @@ #include "mt76x02_mcu.h" +int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + u32 *rxfce; + + if (!skb) { + dev_err(mdev->dev, + "MCU message %d (seq %d) timed out\n", cmd, + seq); + dev->mcu_timeout = 1; + return -ETIMEDOUT; + } + + rxfce = (u32 *)skb->cb; + if (seq != FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) + return -EAGAIN; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_mcu_parse_response); + int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int len, bool wait_resp) { @@ -39,31 +61,15 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | FIELD_PREP(MT_MCU_MSG_LEN, skb->len); - ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info); + ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[MT_MCUQ_WM], skb, tx_info); if (ret) goto out; while (wait_resp) { - u32 *rxfce; - bool check_seq = false; - skb = mt76_mcu_get_response(&dev->mt76, expires); - if (!skb) { - dev_err(mdev->dev, - "MCU message %d (seq %d) timed out\n", cmd, - seq); - ret = -ETIMEDOUT; - dev->mcu_timeout = 1; - break; - } - - rxfce = (u32 *)skb->cb; - - if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce)) - check_seq = true; - + ret = mt76x02_mcu_parse_response(mdev, cmd, skb, seq); dev_kfree_skb(skb); - if (check_seq) + if (ret != -EAGAIN) break; } @@ -89,7 +95,8 @@ int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, if (func != Q_SELECT) wait = true; - return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait); + return mt76_mcu_send_msg(&dev->mt76, CMD_FUN_SET_OP, &msg, + sizeof(msg), wait); } EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); @@ -103,8 +110,8 @@ int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on) .level = cpu_to_le32(0), }; - return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), - false); + return mt76_mcu_send_msg(&dev->mt76, CMD_POWER_SAVING_OP, &msg, + sizeof(msg), false); } EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); @@ -123,8 +130,8 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param) if (is_mt76x2e) mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); - ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg), - true); + ret = mt76_mcu_send_msg(&dev->mt76, CMD_CALIBRATION_OP, &msg, + sizeof(msg), true); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h index 5fba1266c648..e187ed52968e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -89,6 +89,8 @@ int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param); int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int len, bool wait_resp); +int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq); int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, u32 val); int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index cf68731bd094..e7e87311d355 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -11,10 +11,11 @@ #include "mt76x02_mcu.h" #include "trace.h" -static void mt76x02_pre_tbtt_tasklet(unsigned long arg) +static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t) { - struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; - struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD]; + struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet); + struct mt76_dev *mdev = &dev->mt76; + struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD]; struct beacon_bc_data data = {}; struct sk_buff *skb; int i; @@ -35,9 +36,9 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg) mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~(0xff00 >> dev->beacon_data_count)); - mt76_csa_check(&dev->mt76); + mt76_csa_check(mdev); - if (dev->mt76.csa_complete) + if (mdev->csa_complete) return; mt76x02_enqueue_buffered_bc(dev, &data, 8); @@ -58,8 +59,7 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg) struct ieee80211_vif *vif = info->control.vif; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; - mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid, - NULL); + mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL); } spin_unlock_bh(&q->lock); } @@ -104,27 +104,6 @@ void mt76x02e_init_beacon_config(struct mt76x02_dev *dev) EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config); static int -mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc) -{ - struct mt76_queue *hwq; - int err; - - hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); - if (!hwq) - return -ENOMEM; - - err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); - if (err < 0) - return err; - - dev->mt76.q_tx[qid] = hwq; - - mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx)); - - return 0; -} - -static int mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, int idx, int n_desc, int bufsize) { @@ -169,14 +148,16 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget) mt76x02_mac_poll_tx_status(dev, false); - for (i = MT_TXQ_MCU; i >= 0; i--) - mt76_queue_tx_cleanup(dev, i, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + for (i = MT_TXQ_PSD; i >= 0; i--) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); if (napi_complete_done(napi, 0)) mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL); - for (i = MT_TXQ_MCU; i >= 0; i--) - mt76_queue_tx_cleanup(dev, i, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + for (i = MT_TXQ_PSD; i >= 0; i--) + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false); mt76_worker_schedule(&dev->mt76.tx_worker); @@ -198,8 +179,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) return -ENOMEM; dev->mt76.tx_worker.fn = mt76x02_tx_worker; - tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, - (unsigned long)dev); + tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet); spin_lock_init(&dev->txstatus_fifo_lock); kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); @@ -209,22 +189,31 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i), - MT76x02_TX_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i), + MT76x02_TX_RING_SIZE, + MT_TX_RING_BASE); if (ret) return ret; } - ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD, - MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE); + ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT, + MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; - ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU, - MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU, + MT_MCU_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; + mt76x02_irq_enable(dev, + MT_INT_TX_DONE(IEEE80211_AC_VO) | + MT_INT_TX_DONE(IEEE80211_AC_VI) | + MT_INT_TX_DONE(IEEE80211_AC_BE) | + MT_INT_TX_DONE(IEEE80211_AC_BK) | + MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) | + MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU)); + ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); if (ret) @@ -292,7 +281,7 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) if (dev->mt76.csa_complete) mt76_csa_finish(&dev->mt76); else - mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]); + mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]); } if (intr & MT_INT_TX_STAT) @@ -357,7 +346,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev) int i; for (i = 0; i < 4; i++) { - q = dev->mt76.q_tx[i]; + q = dev->mphy.q_tx[i]; if (!q->queued) continue; @@ -475,8 +464,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) if (restart) mt76_mcu_restart(dev); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true); for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, i, true); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 2c2f56112b57..efd70ddc2fd1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, struct mt76_tx_info *tx_info) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); - int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx); + int pid, len = tx_info->skb->len, ep = q2ep(dev->mphy.q_tx[qid]->hw_idx); struct mt76x02_txwi *txwi; bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; enum mt76_qsel qsel; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index e43d13d7c988..2953df7d8388 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -297,6 +297,7 @@ void mt76x02u_init_mcu(struct mt76_dev *dev) .headroom = MT_CMD_HDR_LEN, .tailroom = 8, .mcu_send_msg = mt76x02u_mcu_send_msg, + .mcu_parse_response = mt76x02_mcu_parse_response, .mcu_wr_rp = mt76x02u_mcu_wr_rp, .mcu_rd_rp = mt76x02u_mcu_rd_rp, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 11b769af2f8f..7ac20d3c16d7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -186,6 +186,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev) ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); dev->mt76.global_wcid.idx = 255; dev->mt76.global_wcid.hw_key_idx = -1; @@ -304,12 +305,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) /* Allow to change address in HW if we create first interface. */ if (!dev->mphy.vif_mask && - (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) || - memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1))) + (((vif->addr[0] ^ dev->mphy.macaddr[0]) & ~GENMASK(4, 1)) || + memcmp(vif->addr + 1, dev->mphy.macaddr + 1, ETH_ALEN - 1))) mt76x02_mac_setaddr(dev, vif->addr); if (vif->addr[0] & BIT(1)) - idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); + idx = 1 + (((dev->mphy.macaddr[0] ^ vif->addr[0]) >> 2) & 7); /* * Client mode typically only has one configurable BSSID register, @@ -487,7 +488,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 cw_min = 5, cw_max = 10, qid; u32 val; - qid = dev->mt76.q_tx[queue]->hw_idx; + qid = dev->mphy.q_tx[queue]->hw_idx; if (params->cw_min) cw_min = fls(params->cw_min); @@ -621,7 +622,7 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; int idx = msta->wcid.idx; - mt76_stop_tx_queues(&dev->mt76, sta, true); + mt76_stop_tx_queues(&dev->mphy, sta, true); if (mt76_is_mmio(mdev)) mt76x02_mac_wcid_set_drop(dev, idx, ps); } @@ -677,7 +678,7 @@ void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev) for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { u8 *addr = dev->macaddr_list[i].addr; - memcpy(addr, dev->mt76.macaddr, ETH_ALEN); + memcpy(addr, dev->mphy.macaddr, ETH_ALEN); if (!i) continue; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index 410ffce3baff..c57e05a5c65e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -16,7 +16,7 @@ mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev) { void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR; - memcpy(dev->mt76.macaddr, src, ETH_ALEN); + memcpy(dev->mphy.macaddr, src, ETH_ALEN); return 0; } @@ -502,8 +502,8 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev) mt76x02_eeprom_parse_hw_cap(dev); mt76x2_eeprom_get_macaddr(dev); - mt76_eeprom_override(&dev->mt76); - dev->mt76.macaddr[0] &= ~BIT(1); + mt76_eeprom_override(&dev->mphy); + dev->mphy.macaddr[0] &= ~BIT(1); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c index 9635c04ce032..3c2738903d7d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c @@ -33,13 +33,14 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, }; /* first set the channel without the extension channel info */ - mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true); + mt76_mcu_send_msg(&dev->mt76, CMD_SWITCH_CHANNEL_OP, &msg, + sizeof(msg), true); usleep_range(5000, 10000); msg.ext_chan = 0xe0 + bw_index; - return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), - true); + return mt76_mcu_send_msg(&dev->mt76, CMD_SWITCH_CHANNEL_OP, &msg, + sizeof(msg), true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel); @@ -66,7 +67,8 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, msg.cfg = cpu_to_le32(val); /* first set the channel without the extension channel info */ - return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true); + return mt76_mcu_send_msg(&dev->mt76, CMD_LOAD_CR, &msg, sizeof(msg), + true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr); @@ -84,8 +86,8 @@ int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, if (force) msg.channel |= cpu_to_le32(BIT(31)); - return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg), - true); + return mt76_mcu_send_msg(&dev->mt76, CMD_INIT_GAIN_OP, &msg, + sizeof(msg), true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain); @@ -100,7 +102,7 @@ int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, .data = *tssi_data, }; - return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg), - true); + return mt76_mcu_send_msg(&dev->mt76, CMD_CALIBRATION_OP, &msg, + sizeof(msg), true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 4d50dad29ddf..ecaf85b483ac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -90,7 +90,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(&dev->mt76); + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 48a3ebc9892a..620484390418 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -69,7 +69,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev) int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { - const u8 *macaddr = dev->mt76.macaddr; + const u8 *macaddr = dev->mphy.macaddr; u32 val; int i, k; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index ca6f968411ac..e5b6282d1a6c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -179,6 +179,7 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev) static const struct mt76_mcu_ops mt76x2_mcu_ops = { .mcu_restart = mt76pci_mcu_restart, .mcu_send_msg = mt76x02_mcu_msg_send, + .mcu_parse_response = mt76x02_mcu_parse_response, }; int ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 4e003c7b62cf..2575369e44e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -75,6 +75,7 @@ static int mt76x2u_probe(struct usb_interface *intf, return 0; err: + mt76u_queues_deinit(&dev->mt76); mt76_free_device(&dev->mt76); usb_set_intfdata(intf, NULL); usb_put_dev(udev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile index 57fe726cc38b..cc2054dffa98 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_MT7915E) += mt7915e.o mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \ debugfs.o + +mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 1049927faf24..7d810fbf2862 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -48,32 +48,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL, mt7915_radar_trigger, "%lld\n"); static int -mt7915_dbdc_set(void *data, u64 val) -{ - struct mt7915_dev *dev = data; - - if (val) - mt7915_register_ext_phy(dev); - else - mt7915_unregister_ext_phy(dev); - - return 0; -} - -static int -mt7915_dbdc_get(void *data, u64 *val) -{ - struct mt7915_dev *dev = data; - - *val = !!mt7915_ext_phy(dev); - - return 0; -} - -DEFINE_DEBUGFS_ATTRIBUTE(fops_dbdc, mt7915_dbdc_get, - mt7915_dbdc_set, "%lld\n"); - -static int mt7915_fw_debug_set(void *data, u64 val) { struct mt7915_dev *dev = data; @@ -233,6 +207,7 @@ static const struct file_operations fops_tx_stats = { .read = seq_read, .llseek = seq_lseek, .release = single_release, + .owner = THIS_MODULE, }; static int mt7915_read_temperature(struct seq_file *s, void *data) @@ -279,19 +254,23 @@ static int mt7915_queues_read(struct seq_file *s, void *data) { struct mt7915_dev *dev = dev_get_drvdata(s->private); - static const struct { + struct mt76_phy *mphy_ext = dev->mt76.phy2; + struct mt76_queue *ext_q = mphy_ext ? mphy_ext->q_tx[MT_TXQ_BE] : NULL; + struct { + struct mt76_queue *q; char *queue; - int id; } queue_map[] = { - { "WFDMA0", MT_TXQ_BE }, - { "MCUWM", MT_TXQ_MCU }, - { "MCUWA", MT_TXQ_MCU_WA }, - { "MCUFWQ", MT_TXQ_FWDL }, + { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" }, + { ext_q, "WFDMA1" }, + { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" }, + { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" }, + { dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" }, + { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" }, }; int i; for (i = 0; i < ARRAY_SIZE(queue_map); i++) { - struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id]; + struct mt76_queue *q = queue_map[i].q; if (!q) continue; @@ -375,7 +354,6 @@ int mt7915_init_debugfs(struct mt7915_dev *dev) debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, mt7915_queues_acq); debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats); - debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); /* test knobs */ @@ -460,6 +438,7 @@ static const struct file_operations fops_sta_stats = { .read = seq_read, .llseek = seq_lseek, .release = single_release, + .owner = THIS_MODULE, }; void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index cfa12c4c671f..8c1f9c77b14f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -5,42 +5,16 @@ #include "../dma.h" #include "mac.h" -static int -mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc) +int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc) { - struct mt76_queue *hwq; - int err, i; + int i, err; - hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); - if (!hwq) - return -ENOMEM; - - err = mt76_queue_alloc(dev, hwq, MT7915_TXQ_BAND0, n_desc, 0, - MT_TX_RING_BASE); - if (err < 0) - return err; - - for (i = 0; i < MT_TXQ_MCU; i++) - dev->mt76.q_tx[i] = hwq; - - return 0; -} - -static int -mt7915_init_mcu_queue(struct mt7915_dev *dev, int qid, int idx, int n_desc) -{ - struct mt76_queue *hwq; - int err; - - hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); - if (!hwq) - return -ENOMEM; - - err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); + err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE); if (err < 0) return err; - dev->mt76.q_tx[qid] = hwq; + for (i = 0; i <= MT_TXQ_PSD; i++) + phy->mt76->q_tx[i] = phy->mt76->q_tx[0]; return 0; } @@ -61,6 +35,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, case PKT_TYPE_RX_EVENT: mt7915_mcu_rx_event(dev, skb); break; +#ifdef CONFIG_NL80211_TESTMODE + case PKT_TYPE_TXRXV: + mt7915_mac_fill_rx_vector(dev, skb); + break; +#endif case PKT_TYPE_NORMAL: if (!mt7915_mac_fill_rx(dev, skb)) { mt76_rx(&dev->mt76, q, skb); @@ -76,8 +55,8 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, static void mt7915_tx_cleanup(struct mt7915_dev *dev) { - mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); - mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false); } static int mt7915_poll_tx(struct napi_struct *napi, int budget) @@ -257,25 +236,26 @@ int mt7915_dma_init(struct mt7915_dev *dev) mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0); /* init tx queue */ - ret = mt7915_init_tx_queues(dev, MT7915_TX_RING_SIZE); + ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0, + MT7915_TX_RING_SIZE); if (ret) return ret; /* command to WM */ - ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU, MT7915_TXQ_MCU_WM, - MT7915_TX_MCU_RING_SIZE); + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM, + MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; /* command to WA */ - ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU_WA, MT7915_TXQ_MCU_WA, - MT7915_TX_MCU_RING_SIZE); + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA, + MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; /* firmware download */ - ret = mt7915_init_mcu_queue(dev, MT_TXQ_FWDL, MT7915_TXQ_FWDL, - MT7915_TX_FWDL_RING_SIZE); + ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL, + MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE); if (ret) return ret; @@ -293,13 +273,21 @@ int mt7915_dma_init(struct mt7915_dev *dev) if (ret) return ret; - /* rx data */ - ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, - MT7915_RX_RING_SIZE, rx_buf_size, - MT_RX_DATA_RING_BASE); + /* rx data queue */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], + MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE, + rx_buf_size, MT_RX_DATA_RING_BASE); if (ret) return ret; + if (dev->dbdc_support) { + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT], + MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE, + rx_buf_size, MT_RX_DATA_RING_BASE); + if (ret) + return ret; + } + ret = mt76_init_queues(dev); if (ret < 0) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c index 7deba7ebd68a..7a2be3f61398 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c @@ -4,16 +4,11 @@ #include "mt7915.h" #include "eeprom.h" -static inline bool mt7915_efuse_valid(u8 val) -{ - return !(val == 0xff); -} - -u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset) +static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset) { u8 *data = dev->mt76.eeprom.data; - if (!mt7915_efuse_valid(data[offset])) + if (data[offset] == 0xff) mt7915_mcu_get_eeprom(dev, offset); return data[offset]; @@ -34,10 +29,10 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev) static int mt7915_check_eeprom(struct mt7915_dev *dev) { - u16 val; u8 *eeprom = dev->mt76.eeprom.data; + u16 val; - mt7915_eeprom_read(dev, 0); + mt7915_eeprom_read(dev, MT_EE_CHIP_ID); val = get_unaligned_le16(eeprom); switch (val) { @@ -48,35 +43,50 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev) } } -static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev) +void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy) { - u8 *eeprom = dev->mt76.eeprom.data; - u8 tx_mask, max_nss = 4; - u32 val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF); + struct mt7915_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + u32 val; + val = mt7915_eeprom_read(dev, MT_EE_WIFI_CONF + ext_phy); val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val); switch (val) { case MT_EE_5GHZ: - dev->mt76.cap.has_5ghz = true; + phy->mt76->cap.has_5ghz = true; break; case MT_EE_2GHZ: - dev->mt76.cap.has_2ghz = true; + phy->mt76->cap.has_2ghz = true; break; default: - dev->mt76.cap.has_2ghz = true; - dev->mt76.cap.has_5ghz = true; + phy->mt76->cap.has_2ghz = true; + phy->mt76->cap.has_5ghz = true; break; } +} + +static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev) +{ + u8 nss, tx_mask[2] = {}, *eeprom = dev->mt76.eeprom.data; + + mt7915_eeprom_parse_band_config(&dev->phy); /* read tx mask from eeprom */ - tx_mask = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK, - eeprom[MT_EE_WIFI_CONF]); - if (!tx_mask || tx_mask > max_nss) - tx_mask = max_nss; - - dev->chainmask = BIT(tx_mask) - 1; - dev->mphy.antenna_mask = dev->chainmask; - dev->phy.chainmask = dev->chainmask; + tx_mask[0] = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK, + eeprom[MT_EE_WIFI_CONF]); + if (dev->dbdc_support) + tx_mask[1] = FIELD_GET(MT_EE_WIFI_CONF_TX_MASK, + eeprom[MT_EE_WIFI_CONF + 1]); + + nss = tx_mask[0] + tx_mask[1]; + if (!nss || nss > 4) { + tx_mask[0] = 4; + nss = 4; + } + + dev->chainmask = BIT(nss) - 1; + dev->mphy.antenna_mask = BIT(tx_mask[0]) - 1; + dev->phy.chainmask = dev->mphy.antenna_mask; } int mt7915_eeprom_init(struct mt7915_dev *dev) @@ -92,10 +102,10 @@ int mt7915_eeprom_init(struct mt7915_dev *dev) return ret; mt7915_eeprom_parse_hw_cap(dev); - memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, ETH_ALEN); - mt76_eeprom_override(&dev->mt76); + mt76_eeprom_override(&dev->mphy); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h index 4e31d6ab4fa6..6712032b40df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h @@ -15,6 +15,7 @@ enum mt7915_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, MT_EE_MAC_ADDR = 0x004, + MT_EE_MAC_ADDR2 = 0x00a, MT_EE_DDIE_FT_VERSION = 0x050, MT_EE_WIFI_CONF = 0x190, MT_EE_TX0_POWER_2G = 0x2fc, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index 0232b66acb4f..ed4635bd151a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -6,6 +6,113 @@ #include "mac.h" #include "eeprom.h" +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ +} + +static struct ieee80211_rate mt7915_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(11, 60), + OFDM_RATE(15, 90), + OFDM_RATE(10, 120), + OFDM_RATE(14, 180), + OFDM_RATE(9, 240), + OFDM_RATE(13, 360), + OFDM_RATE(8, 480), + OFDM_RATE(12, 540), +}; + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = 16, + .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) +#endif + }, { + .max = MT7915_MAX_INTERFACES, + .types = BIT(NL80211_IFTYPE_STATION) + } +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = MT7915_MAX_INTERFACES, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160) | + BIT(NL80211_CHAN_WIDTH_80P80), + } +}; + +static void +mt7915_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct mt7915_phy *phy = mphy->priv; + struct cfg80211_chan_def *chandef = &mphy->chandef; + + dev->mt76.region = request->dfs_region; + + if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) + return; + + mt7915_dfs_init_radar_detector(phy); +} + +static void +mt7915_init_wiphy(struct ieee80211_hw *hw) +{ + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct wiphy *wiphy = hw->wiphy; + + hw->queues = 4; + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; + + phy->slottime = 9; + + hw->sta_data_size = sizeof(struct mt7915_sta); + hw->vif_data_size = sizeof(struct mt7915_vif); + + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + wiphy->reg_notifier = mt7915_regd_notifier; + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + + hw->max_tx_fragments = 4; +} + static void mt7915_mac_init_band(struct mt7915_dev *dev, u8 band) { @@ -35,25 +142,26 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band) mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN); + + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536); + /* disable rx rate report by default due to hw issues */ + mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN); } static void mt7915_mac_init(struct mt7915_dev *dev) { int i; - mt76_rmw_field(dev, MT_DMA_DCR0, MT_DMA_DCR0_MAX_RX_LEN, 1536); mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536); - /* enable rx rate report */ - mt76_set(dev, MT_DMA_DCR0, MT_DMA_DCR0_RXD_G5_EN); /* disable hardware de-agg */ mt76_clear(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN); for (i = 0; i < MT7915_WTBL_SIZE; i++) mt7915_mac_wtbl_update(dev, i, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + for (i = 0; i < 2; i++) + mt7915_mac_init_band(dev, i); - mt7915_mac_init_band(dev, 0); - mt7915_mac_init_band(dev, 1); mt7915_mcu_set_rts_thresh(&dev->phy, 0x92b); } @@ -108,6 +216,60 @@ static void mt7915_init_txpower(struct mt7915_dev *dev) mt7915_eeprom_init_sku(dev); } +static int mt7915_register_ext_phy(struct mt7915_dev *dev) +{ + struct mt7915_phy *phy = mt7915_ext_phy(dev); + struct mt76_phy *mphy; + int ret; + + if (!dev->dbdc_support) + return 0; + + if (phy) + return 0; + + mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops); + if (!mphy) + return -ENOMEM; + + phy = mphy->priv; + phy->dev = dev; + phy->mt76 = mphy; + phy->chainmask = dev->chainmask & ~dev->phy.chainmask; + mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1; + mt7915_init_wiphy(mphy->hw); + + INIT_LIST_HEAD(&phy->stats_list); + INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work); + + mt7915_eeprom_parse_band_config(phy); + mt7915_set_stream_vht_txbf_caps(phy); + mt7915_set_stream_he_caps(phy); + + memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2, + ETH_ALEN); + mt76_eeprom_override(mphy); + + /* The second interface does not get any packets unless it has a vif */ + ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); + + ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1, + MT7915_TX_RING_SIZE); + if (ret) + goto error; + + ret = mt76_register_phy(mphy, true, mt7915_rates, + ARRAY_SIZE(mt7915_rates)); + if (ret) + goto error; + + return 0; + +error: + ieee80211_free_hw(mphy->hw); + return ret; +} + static void mt7915_init_work(struct work_struct *work) { struct mt7915_dev *dev = container_of(work, struct mt7915_dev, @@ -117,6 +279,7 @@ static void mt7915_init_work(struct work_struct *work) mt7915_mac_init(dev); mt7915_init_txpower(dev); mt7915_txbf_init(dev); + mt7915_register_ext_phy(dev); } static int mt7915_init_hardware(struct mt7915_dev *dev) @@ -129,6 +292,8 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) spin_lock_init(&dev->token_lock); idr_init(&dev->token); + dev->dbdc_support = !!(mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5)); + ret = mt7915_dma_init(dev); if (ret) return ret; @@ -162,109 +327,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev) return 0; } -#define CCK_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ - .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ - .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \ -} - -#define OFDM_RATE(_idx, _rate) { \ - .bitrate = _rate, \ - .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ - .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ -} - -static struct ieee80211_rate mt7915_rates[] = { - CCK_RATE(0, 10), - CCK_RATE(1, 20), - CCK_RATE(2, 55), - CCK_RATE(3, 110), - OFDM_RATE(11, 60), - OFDM_RATE(15, 90), - OFDM_RATE(10, 120), - OFDM_RATE(14, 180), - OFDM_RATE(9, 240), - OFDM_RATE(13, 360), - OFDM_RATE(8, 480), - OFDM_RATE(12, 540), -}; - -static const struct ieee80211_iface_limit if_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_ADHOC) - }, { - .max = MT7915_MAX_INTERFACES, - .types = BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_STATION) - } -}; - -static const struct ieee80211_iface_combination if_comb[] = { - { - .limits = if_limits, - .n_limits = ARRAY_SIZE(if_limits), - .max_interfaces = 4, - .num_different_channels = 1, - .beacon_int_infra_match = true, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160) | - BIT(NL80211_CHAN_WIDTH_80P80), - } -}; - -static void -mt7915_regd_notifier(struct wiphy *wiphy, - struct regulatory_request *request) -{ - struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct mt7915_dev *dev = mt7915_hw_dev(hw); - struct mt76_phy *mphy = hw->priv; - struct mt7915_phy *phy = mphy->priv; - struct cfg80211_chan_def *chandef = &mphy->chandef; - - dev->mt76.region = request->dfs_region; - - if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) - return; - - mt7915_dfs_init_radar_detector(phy); -} - -static void -mt7915_init_wiphy(struct ieee80211_hw *hw) -{ - struct mt7915_phy *phy = mt7915_hw_phy(hw); - struct wiphy *wiphy = hw->wiphy; - - hw->queues = 4; - hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; - hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; - - phy->slottime = 9; - - hw->sta_data_size = sizeof(struct mt7915_sta); - hw->vif_data_size = sizeof(struct mt7915_vif); - - wiphy->iface_combinations = if_comb; - wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - wiphy->reg_notifier = mt7915_regd_notifier; - wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; - - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); - - ieee80211_hw_set(hw, HAS_RATE_CONTROL); - - hw->max_tx_fragments = 4; -} - void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy) { int nss = hweight8(phy->chainmask); @@ -342,7 +404,7 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; /* num_snd_dim */ - c = (nss - 1) | (max_t(int, mcs->tx_mcs_160, 1) << 3); + c = (nss - 1) | (max_t(int, le16_to_cpu(mcs->tx_mcs_160), 1) << 3); elem->phy_cap_info[5] |= c; c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | @@ -354,35 +416,24 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap, } static void -mt7915_gen_ppe_thresh(u8 *he_ppet) +mt7915_gen_ppe_thresh(u8 *he_ppet, int nss) { - int ru, nss, max_nss = 1, max_ru = 3; - u8 bit = 7, ru_bit_mask = 0x7; + u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* HE80 */ u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; - he_ppet[0] = max_nss & IEEE80211_PPE_THRES_NSS_MASK; - he_ppet[0] |= (ru_bit_mask << - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & - IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; - - for (nss = 0; nss <= max_nss; nss++) { - for (ru = 0; ru < max_ru; ru++) { - u8 val; - int i; - - if (!(ru_bit_mask & BIT(ru))) - continue; - - val = (ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & - 0x3f; - val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); - for (i = 5; i >= 0; i--) { - he_ppet[bit / 8] |= - ((val >> i) & 0x1) << ((bit % 8)); - bit++; - } - } - } + he_ppet[0] = FIELD_PREP(IEEE80211_PPE_THRES_NSS_MASK, nss - 1) | + FIELD_PREP(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, + ru_bit_mask); + + ppet_bits = IEEE80211_PPE_THRES_INFO_PPET_SIZE * + nss * hweight8(ru_bit_mask) * 2; + ppet_size = DIV_ROUND_UP(ppet_bits, 8); + + for (i = 0; i < ppet_size - 1; i++) + he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3]; + + he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3] & + (0xff >> (8 - (ppet_bits - 1) % 8)); } static int @@ -513,7 +564,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); if (he_cap_elem->phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { - mt7915_gen_ppe_thresh(he_cap->ppe_thres); + mt7915_gen_ppe_thresh(he_cap->ppe_thres, nss); } else { he_cap_elem->phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US; @@ -528,10 +579,9 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) { struct ieee80211_sband_iftype_data *data; struct ieee80211_supported_band *band; - struct mt76_dev *mdev = &phy->dev->mt76; int n; - if (mdev->cap.has_2ghz) { + if (phy->mt76->cap.has_2ghz) { data = phy->iftype[NL80211_BAND_2GHZ]; n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data); @@ -540,7 +590,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) band->n_iftype_data = n; } - if (mdev->cap.has_5ghz) { + if (phy->mt76->cap.has_5ghz) { data = phy->iftype[NL80211_BAND_5GHZ]; n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data); @@ -550,95 +600,7 @@ void mt7915_set_stream_he_caps(struct mt7915_phy *phy) } } -static void -mt7915_cap_dbdc_enable(struct mt7915_dev *dev) -{ - dev->mphy.sband_5g.sband.vht_cap.cap &= - ~(IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ); - - if (dev->chainmask == 0xf) - dev->mphy.antenna_mask = dev->chainmask >> 2; - else - dev->mphy.antenna_mask = dev->chainmask >> 1; - - dev->phy.chainmask = dev->mphy.antenna_mask; - dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask; - dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask; - - mt76_set_stream_caps(&dev->mphy, true); - mt7915_set_stream_vht_txbf_caps(&dev->phy); - mt7915_set_stream_he_caps(&dev->phy); -} - -static void -mt7915_cap_dbdc_disable(struct mt7915_dev *dev) -{ - dev->mphy.sband_5g.sband.vht_cap.cap |= - IEEE80211_VHT_CAP_SHORT_GI_160 | - IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; - - dev->mphy.antenna_mask = dev->chainmask; - dev->phy.chainmask = dev->chainmask; - dev->mphy.hw->wiphy->available_antennas_rx = dev->chainmask; - dev->mphy.hw->wiphy->available_antennas_tx = dev->chainmask; - - mt76_set_stream_caps(&dev->mphy, true); - mt7915_set_stream_vht_txbf_caps(&dev->phy); - mt7915_set_stream_he_caps(&dev->phy); -} - -int mt7915_register_ext_phy(struct mt7915_dev *dev) -{ - struct mt7915_phy *phy = mt7915_ext_phy(dev); - struct mt76_phy *mphy; - int ret; - bool bound; - - /* TODO: enble DBDC */ - bound = mt7915_l1_rr(dev, MT_HW_BOUND) & BIT(5); - if (!bound) - return -EINVAL; - - if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) - return -EINVAL; - - if (phy) - return 0; - - mt7915_cap_dbdc_enable(dev); - mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops); - if (!mphy) - return -ENOMEM; - - phy = mphy->priv; - phy->dev = dev; - phy->mt76 = mphy; - phy->chainmask = dev->chainmask & ~dev->phy.chainmask; - mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1; - mt7915_init_wiphy(mphy->hw); - - INIT_LIST_HEAD(&phy->stats_list); - INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work); - - /* - * Make the secondary PHY MAC address local without overlapping with - * the usual MAC address allocation scheme on multiple virtual interfaces - */ - mphy->hw->wiphy->perm_addr[0] |= 2; - mphy->hw->wiphy->perm_addr[0] ^= BIT(7); - - /* The second interface does not get any packets unless it has a vif */ - ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); - - ret = mt76_register_phy(mphy); - if (ret) - ieee80211_free_hw(mphy->hw); - - return ret; -} - -void mt7915_unregister_ext_phy(struct mt7915_dev *dev) +static void mt7915_unregister_ext_phy(struct mt7915_dev *dev) { struct mt7915_phy *phy = mt7915_ext_phy(dev); struct mt76_phy *mphy = dev->mt76.phy2; @@ -646,7 +608,6 @@ void mt7915_unregister_ext_phy(struct mt7915_dev *dev) if (!phy) return; - mt7915_cap_dbdc_disable(dev); mt76_unregister_phy(mphy); ieee80211_free_hw(mphy->hw); } @@ -683,9 +644,22 @@ int mt7915_register_device(struct mt7915_dev *dev) dev->mphy.sband_5g.sband.vht_cap.cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; - mt7915_cap_dbdc_disable(dev); + if (!dev->dbdc_support) + dev->mphy.sband_5g.sband.vht_cap.cap |= + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ; + dev->mphy.hw->wiphy->available_antennas_rx = dev->phy.chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->phy.chainmask; + + mt76_set_stream_caps(&dev->mphy, true); + mt7915_set_stream_vht_txbf_caps(&dev->phy); + mt7915_set_stream_he_caps(&dev->phy); dev->phy.dfs_state = -1; +#ifdef CONFIG_NL80211_TESTMODE + dev->mt76.test_ops = &mt7915_testmode_ops; +#endif + ret = mt76_register_device(&dev->mt76, true, mt7915_rates, ARRAY_SIZE(mt7915_rates)); if (ret) @@ -716,6 +690,7 @@ void mt7915_unregister_device(struct mt7915_dev *dev) ieee80211_free_txskb(hw, txwi->skb); } mt76_put_txwi(&dev->mt76, txwi); + dev->token_count--; } spin_unlock_bh(&dev->token_lock); idr_destroy(&dev->token); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 6f159d99a596..f504eeb221f9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -562,21 +562,271 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) return 0; } +#ifdef CONFIG_NL80211_TESTMODE +void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb) +{ + __le32 *rxd = (__le32 *)skb->data; + __le32 *rxv = rxd + 4; + u32 rcpi, ib_rssi, wb_rssi, v20, v21; + s32 foe; + u8 snr; + int i; + + rcpi = le32_to_cpu(rxv[6]); + ib_rssi = le32_to_cpu(rxv[7]); + wb_rssi = le32_to_cpu(rxv[8]) >> 5; + + for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) { + if (i == 3) + wb_rssi = le32_to_cpu(rxv[9]); + + dev->test.last_rcpi[i] = rcpi & 0xff; + dev->test.last_ib_rssi[i] = ib_rssi & 0xff; + dev->test.last_wb_rssi[i] = wb_rssi & 0xff; + } + + v20 = le32_to_cpu(rxv[20]); + v21 = le32_to_cpu(rxv[21]); + + foe = FIELD_GET(MT_CRXV_FOE_LO, v20) | + (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT); + + snr = FIELD_GET(MT_CRXV_SNR, v20) - 16; + + dev->test.last_freq_offset = foe; + dev->test.last_snr = snr; + + dev_kfree_skb(skb); +} +#endif + +static void +mt7915_mac_write_txwi_tm(struct mt7915_dev *dev, struct mt76_phy *mphy, + __le32 *txwi, struct sk_buff *skb) +{ +#ifdef CONFIG_NL80211_TESTMODE + struct mt76_testmode_data *td = &dev->mt76.test; + u8 rate_idx = td->tx_rate_idx; + u8 nss = td->tx_rate_nss; + u8 bw, mode; + u16 rateval = 0; + u32 val; + + if (skb != dev->mt76.test.tx_skb) + return; + + switch (td->tx_rate_mode) { + case MT76_TM_TX_MODE_CCK: + mode = MT_PHY_TYPE_CCK; + break; + case MT76_TM_TX_MODE_HT: + nss = 1 + (rate_idx >> 3); + mode = MT_PHY_TYPE_HT; + break; + case MT76_TM_TX_MODE_VHT: + mode = MT_PHY_TYPE_VHT; + break; + case MT76_TM_TX_MODE_HE_SU: + mode = MT_PHY_TYPE_HE_SU; + break; + case MT76_TM_TX_MODE_HE_EXT_SU: + mode = MT_PHY_TYPE_HE_EXT_SU; + break; + case MT76_TM_TX_MODE_HE_TB: + mode = MT_PHY_TYPE_HE_TB; + break; + case MT76_TM_TX_MODE_HE_MU: + mode = MT_PHY_TYPE_HE_MU; + break; + case MT76_TM_TX_MODE_OFDM: + default: + mode = MT_PHY_TYPE_OFDM; + break; + } + + switch (mphy->chandef.width) { + case NL80211_CHAN_WIDTH_40: + bw = 1; + break; + case NL80211_CHAN_WIDTH_80: + bw = 2; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + bw = 3; + break; + default: + bw = 0; + break; + } + + if (td->tx_rate_stbc && nss == 1) { + nss++; + rateval |= MT_TX_RATE_STBC; + } + + rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | + FIELD_PREP(MT_TX_RATE_MODE, mode) | + FIELD_PREP(MT_TX_RATE_NSS, nss - 1); + + txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); + + le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT); + if (td->tx_rate_mode < MT76_TM_TX_MODE_HT) + txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); + + val = MT_TXD6_FIXED_BW | + FIELD_PREP(MT_TXD6_BW, bw) | + FIELD_PREP(MT_TXD6_TX_RATE, rateval) | + FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi); + + /* for HE_SU/HE_EXT_SU PPDU + * - 1x, 2x, 4x LTF + 0.8us GI + * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI + * for HE_MU PPDU + * - 2x, 4x LTF + 0.8us GI + * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI + * for HE_TB PPDU + * - 1x, 2x LTF + 1.6us GI + * - 4x LTF + 3.2us GI + */ + if (mode >= MT_PHY_TYPE_HE_SU) + val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); + + if (td->tx_rate_ldpc) + val |= MT_TXD6_LDPC; + + txwi[6] |= cpu_to_le32(val); + txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, + dev->test.spe_idx)); +#endif +} + +static void +mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid) +{ + + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + u8 fc_type, fc_stype; + bool wmm = false; + u32 val; + + if (wcid->sta) { + struct ieee80211_sta *sta; + + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + wmm = sta->wme; + } + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | + FIELD_PREP(MT_TXD1_TID, tid); + + if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN) + val |= MT_TXD1_ETH_802_3; + + txwi[1] |= cpu_to_le32(val); + + fc_type = IEEE80211_FTYPE_DATA >> 2; + fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); + + txwi[2] |= cpu_to_le32(val); + + val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | + FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); + txwi[7] |= cpu_to_le32(val); +} + +static void +mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct ieee80211_key_conf *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool multicast = is_multicast_ether_addr(hdr->addr1); + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + __le16 fc = hdr->frame_control; + u8 fc_type, fc_stype; + u32 val; + + if (ieee80211_is_action(fc) && + mgmt->u.action.category == WLAN_CATEGORY_BACK && + mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { + u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + + txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA); + tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK; + } else if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr; + u16 control = le16_to_cpu(bar->control); + + tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control); + } + + val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | + FIELD_PREP(MT_TXD1_HDR_INFO, + ieee80211_get_hdrlen_from_skb(skb) / 2) | + FIELD_PREP(MT_TXD1_TID, tid); + txwi[1] |= cpu_to_le32(val); + + fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; + fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | + FIELD_PREP(MT_TXD2_MULTICAST, multicast); + + if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + val |= MT_TXD2_BIP; + txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); + } + + if (!ieee80211_is_data(fc) || multicast) + val |= MT_TXD2_FIX_RATE; + + txwi[2] |= cpu_to_le32(val); + + if (ieee80211_is_beacon(fc)) { + txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); + txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); + } + + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + u16 seqno = le16_to_cpu(hdr->seq_ctrl); + + if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = le16_to_cpu(bar->start_seq_num); + } + + val = MT_TXD3_SN_VALID | + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); + txwi[3] |= cpu_to_le32(val); + } + + val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | + FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); + txwi[7] |= cpu_to_le32(val); +} + void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, bool beacon) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; - bool multicast = is_multicast_ether_addr(hdr->addr1); struct ieee80211_vif *vif = info->control.vif; struct mt76_phy *mphy = &dev->mphy; bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; - u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; - __le16 fc = hdr->frame_control; - u16 tx_count = 15, seqno = 0; - u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; + bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + u16 tx_count = 15; u32 val; if (vif) { @@ -589,13 +839,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, if (ext_phy && dev->mt76.phy2) mphy = dev->mt76.phy2; - fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; - fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - - txwi[4] = 0; - txwi[5] = 0; - txwi[6] = 0; - if (beacon) { p_fmt = MT_TX_TYPE_FW; q_idx = MT_LMAC_BCN0; @@ -608,20 +851,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb)); } - if (ieee80211_is_action(fc) && - mgmt->u.action.category == WLAN_CATEGORY_BACK && - mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { - u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); - - txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA); - tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK; - } else if (ieee80211_is_back_req(hdr->frame_control)) { - struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr; - u16 control = le16_to_cpu(bar->control); - - tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control); - } - val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | FIELD_PREP(MT_TXD0_Q_IDX, q_idx); @@ -629,10 +858,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, val = MT_TXD1_LONG_FORMAT | FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | - FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | - FIELD_PREP(MT_TXD1_HDR_INFO, - ieee80211_get_hdrlen_from_skb(skb) / 2) | - FIELD_PREP(MT_TXD1_TID, tid) | FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) @@ -640,27 +865,31 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, txwi[1] = cpu_to_le32(val); - val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | - FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) | - FIELD_PREP(MT_TXD2_MULTICAST, multicast); - if (key) { - if (multicast && ieee80211_is_robust_mgmt_frame(skb) && - key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { - val |= MT_TXD2_BIP; - txwi[3] = 0; - } else { - txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME); - } - } else { - txwi[3] = 0; - } - txwi[2] = cpu_to_le32(val); + txwi[2] = 0; + + val = MT_TXD3_SW_POWER_MGMT | + FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); + if (key) + val |= MT_TXD3_PROTECT_FRAME; + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + val |= MT_TXD3_NO_ACK; - if (!ieee80211_is_data(fc) || multicast) { + txwi[3] = cpu_to_le32(val); + txwi[4] = 0; + txwi[5] = 0; + txwi[6] = 0; + txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0; + + if (is_8023) + mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid); + else + mt7915_mac_write_txwi_80211(dev, txwi, skb, key); + + if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) { u16 rate; /* hardware won't add HTC for mgmt/ctrl frame */ - txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE | MT_TXD2_HTC_VLD); + txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD); if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) rate = MT7915_5G_RATE_DEFAULT; @@ -673,35 +902,28 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); } - if (!ieee80211_is_beacon(fc)) - txwi[3] |= cpu_to_le32(MT_TXD3_SW_POWER_MGMT); - else - tx_count = 0x1f; - - if (info->flags & IEEE80211_TX_CTL_NO_ACK) - txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK); - - val = FIELD_PREP(MT_TXD7_TYPE, fc_type) | - FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype); - if (wcid->amsdu) - val |= MT_TXD7_HW_AMSDU; - txwi[7] = cpu_to_le32(val); - - val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); - if (info->flags & IEEE80211_TX_CTL_INJECTED) { - seqno = le16_to_cpu(hdr->seq_ctrl); + if (mt76_testmode_enabled(&dev->mt76)) + mt7915_mac_write_txwi_tm(dev, mphy, txwi, skb); +} - if (ieee80211_is_back_req(hdr->frame_control)) { - struct ieee80211_bar *bar; +static void +mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked) +{ + struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2; + struct mt76_queue *q, *q2 = NULL; - bar = (struct ieee80211_bar *)skb->data; - seqno = le16_to_cpu(bar->start_seq_num); - } + q = mphy->q_tx[0]; + if (blocked == q->blocked) + return; - val |= MT_TXD3_SN_VALID | - FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); + q->blocked = blocked; + if (mphy2) { + q2 = mphy2->q_tx[0]; + q2->blocked = blocked; } - txwi[3] |= cpu_to_le32(val); + + if (!blocked) + mt76_worker_schedule(&dev->mt76.tx_worker); } int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, @@ -723,11 +945,11 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (!wcid) wcid = &dev->mt76.global_wcid; - cb->wcid = wcid->idx; - mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, false); + cb->wcid = wcid->idx; + txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE); for (i = 0; i < nbuf; i++) { txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); @@ -740,12 +962,13 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, tx_info->buf[1].skip_unmap = true; tx_info->nbuf = MT_CT_DMA_BUF_NUM; - txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD); + txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST); if (!key) txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); - if (ieee80211_is_mgmt(hdr->frame_control)) + if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && + ieee80211_is_mgmt(hdr->frame_control)) txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); if (vif) { @@ -759,12 +982,21 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, spin_lock_bh(&dev->token_lock); id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC); + if (id >= 0) + dev->token_count++; + + if (dev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR) + mt7915_set_tx_blocked(dev, true); spin_unlock_bh(&dev->token_lock); + if (id < 0) return id; txp->token = cpu_to_le16(id); - txp->rept_wds_wcid = 0xff; + if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) + txp->rept_wds_wcid = cpu_to_le16(wcid->idx); + else + txp->rept_wds_wcid = cpu_to_le16(0x3ff); tx_info->skb = DMA_DUMMY_DATA; return 0; @@ -795,17 +1027,19 @@ mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) ieee80211_start_tx_ba_session(sta, tid, 0); } -static inline void -mt7915_tx_status(struct ieee80211_sta *sta, struct ieee80211_hw *hw, - struct ieee80211_tx_info *info, struct sk_buff *skb) +static void +mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, + struct ieee80211_sta *sta, u8 stat, + struct list_head *free_list) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_status status = { .sta = sta, .info = info, + .skb = skb, + .free_list = free_list, }; - - if (skb) - status.skb = skb; + struct ieee80211_hw *hw; if (sta) { struct mt7915_sta *msta; @@ -814,18 +1048,19 @@ mt7915_tx_status(struct ieee80211_sta *sta, struct ieee80211_hw *hw, status.rate = &msta->stats.tx_rate; } - /* use status_ext to report HE rate */ - ieee80211_tx_status_ext(hw, &status); -} + hw = mt76_tx_status_get_hw(mdev, skb); -static void -mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, - struct ieee80211_sta *sta, u8 stat) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hw *hw; +#ifdef CONFIG_NL80211_TESTMODE + if (skb == mdev->test.tx_skb) { + struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct ieee80211_vif *vif = phy->monitor_vif; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; - hw = mt76_tx_status_get_hw(mdev, skb); + mt76_tx_complete_skb(mdev, mvif->sta.wcid.idx, skb); + + return; + } +#endif if (info->flags & IEEE80211_TX_CTL_AMPDU) info->flags |= IEEE80211_TX_STAT_AMPDU; @@ -837,16 +1072,7 @@ mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, info->flags |= IEEE80211_TX_STAT_ACK; info->status.tx_time = 0; - - if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { - mt7915_tx_status(sta, hw, info, skb); - return; - } - - if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK)) - mt7915_tx_status(sta, hw, info, NULL); - - ieee80211_free_txskb(hw, skb); + ieee80211_tx_status_ext(hw, &status); } void mt7915_txp_skb_unmap(struct mt76_dev *dev, @@ -865,13 +1091,21 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) { struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data; struct mt76_dev *mdev = &dev->mt76; + struct mt76_phy *mphy_ext = mdev->phy2; struct mt76_txwi_cache *txwi; struct ieee80211_sta *sta = NULL; + LIST_HEAD(free_list); + struct sk_buff *tmp; u8 i, count; + bool wake = false; /* clean DMA queues and unmap buffers first */ - mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); - mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); + if (mphy_ext) { + mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false); + mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false); + } /* * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE, @@ -908,6 +1142,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) if (list_empty(&msta->poll_list)) list_add_tail(&msta->poll_list, &dev->sta_poll_list); spin_unlock_bh(&dev->sta_poll_lock); + continue; } msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); @@ -915,6 +1150,11 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) spin_lock_bh(&dev->token_lock); txwi = idr_remove(&dev->token, msdu); + if (txwi) + dev->token_count--; + if (dev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR && + dev->mphy.q_tx[0]->blocked) + wake = true; spin_unlock_bh(&dev->token_lock); if (!txwi) @@ -937,16 +1177,29 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb) atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); } - mt7915_tx_complete_status(mdev, txwi->skb, sta, stat); + mt7915_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list); txwi->skb = NULL; } mt76_put_txwi(mdev, txwi); } - dev_kfree_skb(skb); mt7915_mac_sta_poll(dev); + + if (wake) { + spin_lock_bh(&dev->token_lock); + mt7915_set_tx_blocked(dev, false); + spin_unlock_bh(&dev->token_lock); + } + mt76_worker_schedule(&dev->mt76.tx_worker); + + napi_consume_skb(skb, 1); + + list_for_each_entry_safe(skb, tmp, &free_list, list) { + skb_list_del_init(skb); + napi_consume_skb(skb, 1); + } } void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) @@ -979,7 +1232,8 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]); - mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0); + mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0, + NULL); } } @@ -1081,17 +1335,48 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy) MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); } -/* - * TODO: mib counters are read-clear and there're many HE functionalities need - * such info, hence firmware prepares a task to read the fields out to a shared - * structure. User should switch to use event format to avoid race condition. - */ +void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy) +{ + mt7915_l2_set(dev, MT_WF_PHY_RXTD12(ext_phy), + MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY | + MT_WF_PHY_RXTD12_IRPI_SW_CLR); + + mt7915_l2_set(dev, MT_WF_PHY_RX_CTRL1(ext_phy), + FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5)); +} + +static u8 +mt7915_phy_get_nf(struct mt7915_phy *phy, int idx) +{ + static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; + struct mt7915_dev *dev = phy->dev; + u32 val, sum = 0, n = 0; + int nss, i; + + for (nss = 0; nss < hweight8(phy->chainmask); nss++) { + u32 reg = MT_WF_IRPI(nss + (idx << dev->dbdc_support)); + + for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { + val = mt7915_l2_rr(dev, reg); + sum += val * nf_power[i]; + n += val; + } + } + + if (!n) + return 0; + + return sum / n; +} + static void mt7915_phy_update_channel(struct mt76_phy *mphy, int idx) { struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76); + struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv; struct mt76_channel_state *state; u64 busy_time, tx_time, rx_time, obss_time; + int nf; busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx), MT_MIB_SDR9_BUSY_MASK); @@ -1102,12 +1387,18 @@ mt7915_phy_update_channel(struct mt76_phy *mphy, int idx) obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx), MT_MIB_OBSSTIME_MASK); - /* TODO: state->noise */ + nf = mt7915_phy_get_nf(phy, idx); + if (!phy->noise) + phy->noise = nf << 4; + else if (nf) + phy->noise += nf - (phy->noise >> 4); + state = mphy->chan_state; state->cc_busy += busy_time; state->cc_tx += tx_time; state->cc_rx += rx_time + obss_time; state->cc_bss_rx += rx_time; + state->noise = -(phy->noise >> 4); } void mt7915_update_channel(struct mt76_dev *mdev) @@ -1162,8 +1453,10 @@ mt7915_update_beacons(struct mt7915_dev *dev) } static void -mt7915_dma_reset(struct mt7915_dev *dev) +mt7915_dma_reset(struct mt7915_phy *phy) { + struct mt7915_dev *dev = phy->dev; + struct mt76_phy *mphy_ext = dev->mt76.phy2; int i; mt76_clear(dev, MT_WFDMA0_GLO_CFG, @@ -1172,8 +1465,12 @@ mt7915_dma_reset(struct mt7915_dev *dev) MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN); usleep_range(1000, 2000); - for (i = 0; i < __MT_TXQ_MAX; i++) - mt76_queue_tx_cleanup(dev, i, true); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], true); + for (i = 0; i < __MT_TXQ_MAX; i++) { + mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true); + if (mphy_ext) + mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true); + } mt76_for_each_q_rx(&dev->mt76, i) { mt76_queue_rx_reset(dev, i); @@ -1229,7 +1526,7 @@ void mt7915_mac_reset_work(struct work_struct *work) mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { - mt7915_dma_reset(dev); + mt7915_dma_reset(&dev->phy); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); @@ -1329,7 +1626,7 @@ mt7915_mac_sta_stats_work(struct mt7915_phy *phy) spin_unlock_bh(&dev->sta_poll_lock); /* use MT_TX_FREE_RATE to report Tx rate for further devices */ - mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, msta->wcid.idx); + mt7915_mcu_get_tx_rate(dev, RATE_CTRL_RU_INFO, msta->wcid.idx); spin_lock_bh(&dev->sta_poll_lock); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index c8bb5ea96c60..d420392b952d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -128,6 +128,11 @@ enum rx_pkt_type { #define MT_CRXV_HE_BEAM_CHNG BIT(13) #define MT_CRXV_HE_DOPPLER BIT(16) +#define MT_CRXV_SNR GENMASK(18, 13) +#define MT_CRXV_FOE_LO GENMASK(31, 19) +#define MT_CRXV_FOE_HI GENMASK(6, 0) +#define MT_CRXV_FOE_SHIFT 13 + enum tx_header_format { MT_HDR_FORMAT_802_3, MT_HDR_FORMAT_CMD, @@ -160,6 +165,7 @@ enum tx_mcu_port_q_idx { #define MT_CT_INFO_MGMT_FRAME BIT(2) #define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3) #define MT_CT_INFO_HSR2_TX BIT(4) +#define MT_CT_INFO_FROM_HOST BIT(7) #define MT_TXD_SIZE (8 * 4) @@ -176,6 +182,7 @@ enum tx_mcu_port_q_idx { #define MT_TXD1_HDR_PAD GENMASK(19, 18) #define MT_TXD1_HDR_FORMAT GENMASK(17, 16) #define MT_TXD1_HDR_INFO GENMASK(15, 11) +#define MT_TXD1_ETH_802_3 BIT(15) #define MT_TXD1_VTA BIT(10) #define MT_TXD1_WLAN_IDX GENMASK(9, 0) @@ -229,7 +236,7 @@ enum tx_mcu_port_q_idx { #define MT_TXD6_ANT_ID GENMASK(7, 4) #define MT_TXD6_DYN_BW BIT(3) #define MT_TXD6_FIXED_BW BIT(2) -#define MT_TXD6_BW GENMASK(2, 0) +#define MT_TXD6_BW GENMASK(1, 0) #define MT_TXD7_TXD_LEN GENMASK(31, 30) #define MT_TXD7_UDP_TCP_SUM BIT(29) @@ -246,7 +253,9 @@ enum tx_mcu_port_q_idx { #define MT_TX_RATE_STBC BIT(13) #define MT_TX_RATE_NSS GENMASK(12, 10) #define MT_TX_RATE_MODE GENMASK(9, 6) -#define MT_TX_RATE_IDX GENMASK(5, 0) +#define MT_TX_RATE_SU_EXT_TONE BIT(5) +#define MT_TX_RATE_DCM BIT(4) +#define MT_TX_RATE_IDX GENMASK(3, 0) #define MT_TXP_MAX_BUF_NUM 6 @@ -254,8 +263,7 @@ struct mt7915_txp { __le16 flags; __le16 token; u8 bss_idx; - u8 rept_wds_wcid; - u8 rsv; + __le16 rept_wds_wcid; u8 nbuf; __le32 buf[MT_TXP_MAX_BUF_NUM]; __le16 len[MT_TXP_MAX_BUF_NUM]; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index c48158392057..0c82aa2ef219 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -34,21 +34,24 @@ static int mt7915_start(struct ieee80211_hw *hw) mt7915_mcu_set_pm(dev, 0, 0); mt7915_mcu_set_mac(dev, 0, true, false); mt7915_mcu_set_scs(dev, 0, true); + mt7915_mac_enable_nf(dev, 0); } if (phy != &dev->phy) { mt7915_mcu_set_pm(dev, 1, 0); mt7915_mcu_set_mac(dev, 1, true, false); mt7915_mcu_set_scs(dev, 1, true); + mt7915_mac_enable_nf(dev, 1); } - mt7915_mcu_set_sku_en(phy, true); + mt7915_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76)); mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); set_bit(MT76_STATE_RUNNING, &phy->mt76->state); - ieee80211_queue_delayed_work(hw, &phy->mac_work, - MT7915_WATCHDOG_TIME); + if (!mt76_testmode_enabled(&dev->mt76)) + ieee80211_queue_delayed_work(hw, &phy->mac_work, + MT7915_WATCHDOG_TIME); if (!running) mt7915_mac_reset_counters(phy); @@ -67,6 +70,8 @@ static void mt7915_stop(struct ieee80211_hw *hw) mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, true); + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); if (phy != &dev->phy) { @@ -82,28 +87,51 @@ static void mt7915_stop(struct ieee80211_hw *hw) mutex_unlock(&dev->mt76.mutex); } -static int get_omac_idx(enum nl80211_iftype type, u32 mask) +static inline int get_free_idx(u32 mask, u8 start, u8 end) +{ + return ffs(~mask & GENMASK(end, start)); +} + +static int get_omac_idx(enum nl80211_iftype type, u64 mask) { int i; switch (type) { + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + /* prefer hw bssid slot 1-3 */ + i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3); + if (i) + return i - 1; + + if (type != NL80211_IFTYPE_STATION) + break; + + /* next, try to find a free repeater entry for the sta */ + i = get_free_idx(mask >> REPEATER_BSSID_START, 0, + REPEATER_BSSID_MAX - REPEATER_BSSID_START); + if (i) + return i + 32 - 1; + + i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX); + if (i) + return i - 1; + + if (~mask & BIT(HW_BSSID_0)) + return HW_BSSID_0; + + break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP: /* ap uses hw bssid 0 and ext bssid */ if (~mask & BIT(HW_BSSID_0)) return HW_BSSID_0; - for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++) - if (~mask & BIT(i)) - return i; - break; - case NL80211_IFTYPE_MESH_POINT: - case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_STATION: - /* station uses hw bssid other than 0 */ - for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++) - if (~mask & BIT(i)) - return i; + i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX); + if (i) + return i - 1; + break; default: WARN_ON(1); @@ -125,6 +153,12 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, true); + + if (vif->type == NL80211_IFTYPE_MONITOR && + is_zero_ether_addr(vif->addr)) + phy->monitor_vif = vif; + mvif->idx = ffs(~phy->mt76->vif_mask) - 1; if (mvif->idx >= MT7915_MAX_INTERFACES) { ret = -ENOSPC; @@ -146,12 +180,12 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, else mvif->wmm_idx = mvif->idx % MT7915_MAX_WMM_SETS; - ret = mt7915_mcu_add_dev_info(dev, vif, true); + ret = mt7915_mcu_add_dev_info(phy, vif, true); if (ret) goto out; phy->mt76->vif_mask |= BIT(mvif->idx); - phy->omac_mask |= BIT(mvif->omac_idx); + phy->omac_mask |= BIT_ULL(mvif->omac_idx); idx = MT7915_WTBL_RESERVED - mvif->idx; @@ -171,6 +205,11 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, mtxq->wcid = &mvif->sta.wcid; } + if (vif->type != NL80211_IFTYPE_AP && + (!mvif->omac_idx || mvif->omac_idx > 3)) + vif->offload_flags = 0; + vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; + out: mutex_unlock(&dev->mt76.mutex); @@ -188,13 +227,20 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, /* TODO: disable beacon for the bss */ - mt7915_mcu_add_dev_info(dev, vif, false); + mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, true); + mutex_unlock(&dev->mt76.mutex); + + if (vif == phy->monitor_vif) + phy->monitor_vif = NULL; + + mt7915_mcu_add_dev_info(phy, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); mutex_lock(&dev->mt76.mutex); phy->mt76->vif_mask &= ~BIT(mvif->idx); - phy->omac_mask &= ~BIT(mvif->omac_idx); + phy->omac_mask &= ~BIT_ULL(mvif->omac_idx); mutex_unlock(&dev->mt76.mutex); spin_lock_bh(&dev->sta_poll_lock); @@ -222,7 +268,7 @@ static void mt7915_init_dfs_state(struct mt7915_phy *phy) phy->dfs_state = -1; } -static int mt7915_set_channel(struct mt7915_phy *phy) +int mt7915_set_channel(struct mt7915_phy *phy) { struct mt7915_dev *dev = phy->dev; int ret; @@ -251,8 +297,10 @@ out: mutex_unlock(&dev->mt76.mutex); mt76_txq_schedule_all(phy->mt76); - ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, - MT7915_WATCHDOG_TIME); + + if (!mt76_testmode_enabled(&dev->mt76)) + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, + MT7915_WATCHDOG_TIME); return ret; } @@ -283,8 +331,6 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_AES_CMAC: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; break; - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: @@ -292,6 +338,8 @@ static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_GCMP_256: case WLAN_CIPHER_SUITE_SMS4: break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: default: return -EOPNOTSUPP; } @@ -316,6 +364,13 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) int ret; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { +#ifdef CONFIG_NL80211_TESTMODE + if (dev->mt76.test.state != MT76_TM_STATE_OFF) { + mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, false); + mutex_unlock(&dev->mt76.mutex); + } +#endif ieee80211_stop_queues(hw); ret = mt7915_set_channel(phy); if (ret) @@ -332,11 +387,16 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { - if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR); + + if (!enabled) phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; else phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN, + enabled); + mt76_testmode_reset(&dev->mt76, true); mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); } @@ -764,9 +824,13 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { + struct mt7915_phy *phy = mt7915_hw_phy(hw); struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; struct mt7915_sta_stats *stats = &msta->stats; + if (mt7915_mcu_get_rx_rate(phy, vif, sta, &sinfo->rxrate) == 0) + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + if (!stats->tx_rate.legacy && !stats->tx_rate.flags) return; @@ -802,6 +866,22 @@ mt7915_sta_rc_update(struct ieee80211_hw *hw, ieee80211_queue_work(hw, &dev->rc_work); } +static void mt7915_sta_set_4addr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool enabled) +{ + struct mt7915_dev *dev = mt7915_hw_dev(hw); + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + + if (enabled) + set_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags); + else + clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags); + + mt7915_mcu_sta_update_hdr_trans(dev, vif, sta); +} + const struct ieee80211_ops mt7915_ops = { .tx = mt7915_tx, .start = mt7915_start, @@ -833,6 +913,9 @@ const struct ieee80211_ops mt7915_ops = { .set_antenna = mt7915_set_antenna, .set_coverage_class = mt7915_set_coverage_class, .sta_statistics = mt7915_sta_statistics, + .sta_set_4addr = mt7915_sta_set_4addr, + CFG80211_TESTMODE_CMD(mt76_testmode_cmd) + CFG80211_TESTMODE_DUMP(mt76_testmode_dump) #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = mt7915_sta_add_debugfs, #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index a3ccc1785661..5fdd1a6d32ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -215,32 +215,71 @@ mt7915_mcu_get_sta_nss(u16 mcs_map) return nss - 1; } -static int __mt7915_mcu_msg_send(struct mt7915_dev *dev, struct sk_buff *skb, - int cmd, int *wait_seq) +static int +mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) { + struct mt7915_mcu_rxd *rxd; + int ret = 0; + + if (!skb) { + dev_err(mdev->dev, "Message %d (seq %d) timeout\n", + cmd, seq); + return -ETIMEDOUT; + } + + rxd = (struct mt7915_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -EAGAIN; + + switch (cmd) { + case -MCU_CMD_PATCH_SEM_CONTROL: + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + break; + case MCU_EXT_CMD_THERMAL_CTRL: + skb_pull(skb, sizeof(*rxd) + 4); + ret = le32_to_cpu(*(__le32 *)skb->data); + break; + default: + skb_pull(skb, sizeof(struct mt7915_mcu_rxd)); + break; + } + + return ret; +} + +static int +mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); struct mt7915_mcu_txd *mcu_txd; u8 seq, pkt_fmt, qidx; enum mt76_txq_id txq; __le32 *txd; u32 val; + /* TODO: make dynamic based on msg type */ + mdev->mcu.timeout = 20 * HZ; + seq = ++dev->mt76.mcu.msg_seq & 0xf; if (!seq) seq = ++dev->mt76.mcu.msg_seq & 0xf; if (cmd == -MCU_CMD_FW_SCATTER) { - txq = MT_TXQ_FWDL; + txq = MT_MCUQ_FWDL; goto exit; } mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd)); if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) { - txq = MT_TXQ_MCU_WA; + txq = MT_MCUQ_WA; qidx = MT_TX_MCU_PORT_RX_Q0; pkt_fmt = MT_TX_TYPE_CMD; } else { - txq = MT_TXQ_MCU; + txq = MT_MCUQ_WM; qidx = MT_TX_MCU_PORT_RX_Q0; pkt_fmt = MT_TX_TYPE_CMD; } @@ -276,7 +315,10 @@ static int __mt7915_mcu_msg_send(struct mt7915_dev *dev, struct sk_buff *skb, mcu_txd->set_query = MCU_Q_SET; } - mcu_txd->s2d_index = MCU_S2D_H2N; + if (cmd == MCU_EXT_CMD_MWDS_SUPPORT) + mcu_txd->s2d_index = MCU_S2D_H2C; + else + mcu_txd->s2d_index = MCU_S2D_H2N; WARN_ON(cmd == MCU_EXT_CMD_EFUSE_ACCESS && mcu_txd->set_query != MCU_Q_QUERY); @@ -284,116 +326,7 @@ exit: if (wait_seq) *wait_seq = seq; - return mt76_tx_queue_skb_raw(dev, txq, skb, 0); -} - -static int -mt7915_mcu_parse_eeprom(struct mt7915_dev *dev, struct sk_buff *skb) -{ - struct mt7915_mcu_eeprom_info *res; - u8 *buf; - - if (!skb) - return -EINVAL; - - skb_pull(skb, sizeof(struct mt7915_mcu_rxd)); - - res = (struct mt7915_mcu_eeprom_info *)skb->data; - buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); - memcpy(buf, res->data, 16); - - return 0; -} - -static int -mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd, - struct sk_buff *skb, int seq) -{ - struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; - int ret = 0; - - if (seq != rxd->seq) { - ret = -EAGAIN; - goto out; - } - - switch (cmd) { - case -MCU_CMD_PATCH_SEM_CONTROL: - skb_pull(skb, sizeof(*rxd) - 4); - ret = *skb->data; - break; - case MCU_EXT_CMD_THERMAL_CTRL: - skb_pull(skb, sizeof(*rxd) + 4); - ret = le32_to_cpu(*(__le32 *)skb->data); - break; - case MCU_EXT_CMD_EFUSE_ACCESS: - ret = mt7915_mcu_parse_eeprom(dev, skb); - break; - default: - break; - } -out: - dev_kfree_skb(skb); - - return ret; -} - -static int -mt7915_mcu_wait_response(struct mt7915_dev *dev, int cmd, int seq) -{ - unsigned long expires = jiffies + 20 * HZ; - struct sk_buff *skb; - int ret = 0; - - while (true) { - skb = mt76_mcu_get_response(&dev->mt76, expires); - if (!skb) { - dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n", - cmd, seq); - return -ETIMEDOUT; - } - - ret = mt7915_mcu_parse_response(dev, cmd, skb, seq); - if (ret != -EAGAIN) - break; - } - - return ret; -} - -static int -mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, bool wait_resp) -{ - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); - int ret, seq; - - mutex_lock(&mdev->mcu.mutex); - - ret = __mt7915_mcu_msg_send(dev, skb, cmd, &seq); - if (ret) - goto out; - - if (wait_resp) - ret = mt7915_mcu_wait_response(dev, cmd, seq); - -out: - mutex_unlock(&mdev->mcu.mutex); - - return ret; -} - -static int -mt7915_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, - int len, bool wait_resp) -{ - struct sk_buff *skb; - - skb = mt76_mcu_msg_alloc(mdev, data, len); - if (!skb) - return -ENOMEM; - - return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp); + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); } static void @@ -419,8 +352,8 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) } static void -mt7915_mcu_tx_rate_cal(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, - struct rate_info *rate, u16 r) +mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra, + struct rate_info *rate, u16 r) { struct ieee80211_supported_band *sband; u16 ru_idx = le16_to_cpu(ra->ru_idx); @@ -532,11 +465,11 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) mphy = dev->mt76.phy2; /* current rate */ - mt7915_mcu_tx_rate_cal(mphy, ra, &rate, curr); + mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr); stats->tx_rate = rate; /* probing rate */ - mt7915_mcu_tx_rate_cal(mphy, ra, &prob_rate, probe); + mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe); stats->prob_rate = prob_rate; if (attempts) { @@ -1005,18 +938,42 @@ mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy) } } -static void -mt7915_mcu_bss_sync_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +static int +mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif, + bool bssid, bool enable) { - struct bss_info_sync_mode *sync; - struct tlv *tlv; + struct mt7915_dev *dev = phy->dev; + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + u32 idx = mvif->omac_idx - REPEATER_BSSID_START; + u32 mask = phy->omac_mask >> 32 & ~BIT(idx); + const u8 *addr = vif->addr; + struct { + u8 mode; + u8 force_clear; + u8 clear_bitmap[8]; + u8 entry_count; + u8 write; + u8 band; - tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_SYNC_MODE, sizeof(*sync)); + u8 index; + u8 bssid; + u8 addr[ETH_ALEN]; + } __packed req = { + .mode = !!mask || enable, + .entry_count = 1, + .write = 1, + .band = phy != &dev->phy, + .index = idx * 2 + bssid, + }; - sync = (struct bss_info_sync_mode *)tlv; - sync->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); - sync->dtim_period = vif->bss_conf.dtim_period; - sync->enable = true; + if (bssid) + addr = vif->bss_conf.bssid; + + if (enable) + ether_addr_copy(req.addr, addr); + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MUAR_UPDATE, &req, + sizeof(req), true); } int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, @@ -1025,6 +982,9 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct sk_buff *skb; + if (mvif->omac_idx >= REPEATER_BSSID_START) + mt7915_mcu_muar_config(phy, vif, true, enable); + skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL, MT7915_BSS_UPDATE_MAX_SIZE); if (IS_ERR(skb)) @@ -1045,14 +1005,13 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, if (vif->bss_conf.he_support) mt7915_mcu_bss_he_tlv(skb, vif, phy); - if (mvif->omac_idx > HW_BSSID_MAX) + if (mvif->omac_idx >= EXT_BSSID_START && + mvif->omac_idx < REPEATER_BSSID_START) mt7915_mcu_bss_ext_tlv(skb, mvif); - else - mt7915_mcu_bss_sync_tlv(skb, vif); } - return __mt76_mcu_skb_send_msg(&phy->dev->mt76, skb, - MCU_EXT_CMD_BSS_INFO_UPDATE, true); + return mt76_mcu_skb_send_msg(&phy->dev->mt76, skb, + MCU_EXT_CMD_BSS_INFO_UPDATE, true); } /** starec & wtbl **/ @@ -1133,8 +1092,8 @@ int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif, if (ret) return ret; - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } static void @@ -1213,8 +1172,8 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, &skb); mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); - ret = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); if (ret) return ret; @@ -1225,8 +1184,8 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, mt7915_mcu_sta_ba_tlv(skb, params, enable, tx); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, @@ -1508,9 +1467,7 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) muru = (struct sta_rec_muru *)tlv; muru->cfg.ofdma_dl_en = true; - muru->cfg.ofdma_ul_en = true; muru->cfg.mimo_dl_en = true; - muru->cfg.mimo_ul_en = true; muru->ofdma_dl.punc_pream_rx = HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); @@ -1563,8 +1520,8 @@ mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif, /* starec muru */ mt7915_mcu_sta_muru_tlv(skb, sta); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } static void @@ -1662,7 +1619,7 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht), wtbl_tlv, sta_wtbl); ht = (struct wtbl_ht *)tlv; - ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING; + ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING); ht->af = sta->ht_cap.ampdu_factor; ht->mm = sta->ht_cap.ampdu_density; ht->ht = true; @@ -1676,7 +1633,7 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht), wtbl_tlv, sta_wtbl); vht = (struct wtbl_vht *)tlv; - vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC, + vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); vht->vht = true; af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, @@ -1688,6 +1645,53 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv); } +static void +mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + void *sta_wtbl, void *wtbl_tlv) +{ + struct mt7915_sta *msta; + struct wtbl_hdr_trans *htr = NULL; + struct tlv *tlv; + + tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, sizeof(*htr), + wtbl_tlv, sta_wtbl); + htr = (struct wtbl_hdr_trans *)tlv; + htr->no_rx_trans = true; + if (vif->type == NL80211_IFTYPE_STATION) + htr->to_ds = true; + else + htr->from_ds = true; + + if (!sta) + return; + + msta = (struct mt7915_sta *)sta->drv_priv; + if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) { + htr->to_ds = true; + htr->from_ds = true; + } +} + +int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, MT7915_WTBL_UPDATE_MAX_SIZE); + if (!skb) + return -ENOMEM; + + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb); + mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_WTBL_UPDATE, + true); +} + int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { @@ -1708,8 +1712,8 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, &skb); mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } static void @@ -2010,8 +2014,8 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif, mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable); - r = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + r = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); if (r) return r; } @@ -2026,8 +2030,8 @@ mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif, mt7915_mcu_sta_bfee_tlv(skb, sta, phy); - r = __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + r = mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); if (r) return r; } @@ -2194,8 +2198,8 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } static int @@ -2217,11 +2221,11 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, .action = cpu_to_le32(MT_STA_BSS_GROUP), .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), - .val = cpu_to_le32(mvif->idx), + .val = cpu_to_le32(mvif->idx % 16), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL, &req, + sizeof(req), true); } int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, @@ -2277,12 +2281,13 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, sta_wtbl, &skb); if (enable) { mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); + mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); if (sta) mt7915_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr); } - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, @@ -2327,13 +2332,14 @@ int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, ra->phy.sgi = ra->phy.mcs * 15; out: - return __mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_EXT_CMD_STA_REC_UPDATE, true); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); } -int mt7915_mcu_add_dev_info(struct mt7915_dev *dev, +int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, bool enable) { + struct mt7915_dev *dev = phy->dev; struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; struct { struct req_hdr { @@ -2365,9 +2371,12 @@ int mt7915_mcu_add_dev_info(struct mt7915_dev *dev, }, }; + if (mvif->omac_idx >= REPEATER_BSSID_START) + return mt7915_mcu_muar_config(phy, vif, false, enable); + memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE, - &data, sizeof(data), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_DEV_INFO_UPDATE, + &data, sizeof(data), true); } static void @@ -2458,30 +2467,8 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs); dev_kfree_skb(skb); - return __mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, - MCU_EXT_CMD_BSS_INFO_UPDATE, true); -} - -static int mt7915_mcu_send_firmware(struct mt7915_dev *dev, const void *data, - int len) -{ - int ret = 0, cur_len; - - while (len > 0) { - cur_len = min_t(int, 4096 - sizeof(struct mt7915_mcu_txd), - len); - - ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER, - data, cur_len, false); - if (ret) - break; - - data += cur_len; - len -= cur_len; - mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); - } - - return ret; + return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, + MCU_EXT_CMD_BSS_INFO_UPDATE, true); } static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr, @@ -2495,8 +2482,8 @@ static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr, .addr = cpu_to_le32(addr), }; - return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ, &req, + sizeof(req), true); } static int mt7915_mcu_restart(struct mt76_dev *dev) @@ -2508,8 +2495,8 @@ static int mt7915_mcu_restart(struct mt76_dev *dev) .power_mode = 1, }; - return __mt76_mcu_send_msg(dev, -MCU_CMD_NIC_POWER_CTRL, &req, - sizeof(req), false); + return mt76_mcu_send_msg(dev, -MCU_CMD_NIC_POWER_CTRL, &req, + sizeof(req), false); } static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get) @@ -2520,8 +2507,8 @@ static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get) .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE), }; - return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_SEM_CONTROL, &req, + sizeof(req), true); } static int mt7915_mcu_start_patch(struct mt7915_dev *dev) @@ -2533,8 +2520,8 @@ static int mt7915_mcu_start_patch(struct mt7915_dev *dev) .check_crc = 0, }; - return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_PATCH_FINISH_REQ, &req, + sizeof(req), true); } static int mt7915_driver_own(struct mt7915_dev *dev) @@ -2570,7 +2557,7 @@ static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr, else attr = -MCU_CMD_TARGET_ADDRESS_LEN_REQ; - return __mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true); } static int mt7915_load_patch(struct mt7915_dev *dev) @@ -2629,7 +2616,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev) goto out; } - ret = mt7915_mcu_send_firmware(dev, dl, len); + ret = mt76_mcu_send_firmware(&dev->mt76, -MCU_CMD_FW_SCATTER, + dl, len); if (ret) { dev_err(dev->mt76.dev, "Failed to send patch\n"); goto out; @@ -2697,7 +2685,8 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev, return err; } - err = mt7915_mcu_send_firmware(dev, data + offset, len); + err = mt76_mcu_send_firmware(&dev->mt76, -MCU_CMD_FW_SCATTER, + data + offset, len); if (err) { dev_err(dev->mt76.dev, "Failed to send firmware.\n"); return err; @@ -2810,7 +2799,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev) return -EIO; } - mt76_queue_tx_cleanup(dev, MT_TXQ_FWDL, false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); dev_dbg(dev->mt76.dev, "Firmware init done\n"); @@ -2826,8 +2815,8 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl) .ctrl_val = ctrl }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, - &data, sizeof(data), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_LOG_2_HOST, &data, + sizeof(data), true); } int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) @@ -2835,7 +2824,7 @@ int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) struct { u8 ver; u8 pad; - u16 len; + __le16 len; u8 level; u8 rsv[3]; __le32 module_idx; @@ -2844,8 +2833,21 @@ int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) .level = level, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_DBG_CTRL, - &data, sizeof(data), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_FW_DBG_CTRL, &data, + sizeof(data), false); +} + +static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled) +{ + struct { + u8 enable; + u8 _rsv[3]; + } __packed req = { + .enable = enabled + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MWDS_SUPPORT, &req, + sizeof(req), false); } int mt7915_mcu_init(struct mt7915_dev *dev) @@ -2853,12 +2855,12 @@ int mt7915_mcu_init(struct mt7915_dev *dev) static const struct mt76_mcu_ops mt7915_mcu_ops = { .headroom = sizeof(struct mt7915_mcu_txd), .mcu_skb_send_msg = mt7915_mcu_send_message, - .mcu_send_msg = mt7915_mcu_msg_send, + .mcu_parse_response = mt7915_mcu_parse_response, .mcu_restart = mt7915_mcu_restart, }; int ret; - dev->mt76.mcu_ops = &mt7915_mcu_ops, + dev->mt76.mcu_ops = &mt7915_mcu_ops; ret = mt7915_driver_own(dev); if (ret) @@ -2870,6 +2872,7 @@ int mt7915_mcu_init(struct mt7915_dev *dev) set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); mt7915_mcu_fw_log_2_host(dev, 0); + mt7915_mcu_set_mwds(dev, 1); return 0; } @@ -2916,13 +2919,13 @@ int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, }; int ret; - ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS, - &req_trans, sizeof(req_trans), false); + ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RX_HDR_TRANS, + &req_trans, sizeof(req_trans), false); if (ret) return ret; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL, - &req_mac, sizeof(req_mac), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_MAC_INIT_CTRL, + &req_mac, sizeof(req_mac), true); } int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable) @@ -2937,8 +2940,8 @@ int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable) .enable = enable + 1, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SCS_CTRL, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SCS_CTRL, &req, + sizeof(req), false); } int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val) @@ -2957,8 +2960,8 @@ int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val) .pkt_thresh = cpu_to_le32(0x2), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PROTECT_CTRL, &req, + sizeof(req), true); } int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) @@ -3011,8 +3014,8 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) else e->cw_max = cpu_to_le16(10); } - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, + sizeof(req), true); } int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter) @@ -3042,8 +3045,8 @@ int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter) .band_idx = band, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_PM_STATE_CTRL, &req, + sizeof(req), true); } int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, @@ -3063,57 +3066,106 @@ int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, .val = val, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_CTRL, &req, + sizeof(req), true); } int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val) { struct { - u32 tag; - u16 min_lpn; + __le32 tag; + __le16 min_lpn; u8 rsv[2]; } __packed req = { - .tag = 0x1, - .min_lpn = val, + .tag = cpu_to_le32(0x1), + .min_lpn = cpu_to_le16(val), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, + sizeof(req), true); } int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, const struct mt7915_dfs_pulse *pulse) { struct { - u32 tag; - struct mt7915_dfs_pulse pulse; + __le32 tag; + + __le32 max_width; /* us */ + __le32 max_pwr; /* dbm */ + __le32 min_pwr; /* dbm */ + __le32 min_stgr_pri; /* us */ + __le32 max_stgr_pri; /* us */ + __le32 min_cr_pri; /* us */ + __le32 max_cr_pri; /* us */ } __packed req = { - .tag = 0x3, + .tag = cpu_to_le32(0x3), + +#define __req_field(field) .field = cpu_to_le32(pulse->field) + __req_field(max_width), + __req_field(max_pwr), + __req_field(min_pwr), + __req_field(min_stgr_pri), + __req_field(max_stgr_pri), + __req_field(min_cr_pri), + __req_field(max_cr_pri), +#undef __req_field }; - memcpy(&req.pulse, pulse, sizeof(*pulse)); - - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, + sizeof(req), true); } int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, const struct mt7915_dfs_pattern *pattern) { struct { - u32 tag; - u16 radar_type; - struct mt7915_dfs_pattern pattern; + __le32 tag; + __le16 radar_type; + + u8 enb; + u8 stgr; + u8 min_crpn; + u8 max_crpn; + u8 min_crpr; + u8 min_pw; + u32 min_pri; + u32 max_pri; + u8 max_pw; + u8 min_crbn; + u8 max_crbn; + u8 min_stgpn; + u8 max_stgpn; + u8 min_stgpr; + u8 rsv[2]; + u32 min_stgpr_diff; } __packed req = { - .tag = 0x2, - .radar_type = index, + .tag = cpu_to_le32(0x2), + .radar_type = cpu_to_le16(index), + +#define __req_field_u8(field) .field = pattern->field +#define __req_field_u32(field) .field = cpu_to_le32(pattern->field) + __req_field_u8(enb), + __req_field_u8(stgr), + __req_field_u8(min_crpn), + __req_field_u8(max_crpn), + __req_field_u8(min_crpr), + __req_field_u8(min_pw), + __req_field_u32(min_pri), + __req_field_u32(max_pri), + __req_field_u8(max_pw), + __req_field_u8(min_crbn), + __req_field_u8(max_crbn), + __req_field_u8(min_stgpn), + __req_field_u8(max_stgpn), + __req_field_u8(min_stgpr), + __req_field_u32(min_stgpr_diff), +#undef __req_field_u8 +#undef __req_field_u32 }; - memcpy(&req.pattern, pattern, sizeof(*pattern)); - - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_RDD_TH, &req, + sizeof(req), true); } int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) @@ -3143,11 +3195,20 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) .center_ch = ieee80211_frequency_to_channel(freq1), .bw = mt7915_mcu_chan_bw(chandef), .tx_streams_num = hweight8(phy->mt76->antenna_mask), - .rx_streams = phy->chainmask, + .rx_streams = phy->mt76->antenna_mask, .band_idx = phy != &dev->phy, .channel_band = chandef->chan->band, }; +#ifdef CONFIG_NL80211_TESTMODE + if (dev->mt76.test.tx_antenna_mask && + (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES || + dev->mt76.test.state == MT76_TM_STATE_RX_FRAMES)) { + req.tx_streams_num = fls(dev->mt76.test.tx_antenna_mask); + req.rx_streams = dev->mt76.test.tx_antenna_mask; + } +#endif + if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && @@ -3165,7 +3226,7 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) req.center_ch2 = ieee80211_frequency_to_channel(freq2); } - return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); } int mt7915_mcu_set_eeprom(struct mt7915_dev *dev) @@ -3179,8 +3240,8 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev) .format = EE_FORMAT_WHOLE, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE, + &req, sizeof(req), true); } int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) @@ -3188,9 +3249,22 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) struct mt7915_mcu_eeprom_info req = { .addr = cpu_to_le32(round_down(offset, 16)), }; + struct mt7915_mcu_eeprom_info *res; + struct sk_buff *skb; + int ret; + u8 *buf; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req, + sizeof(req), true, &skb); + if (ret) + return ret; + + res = (struct mt7915_mcu_eeprom_info *)skb->data; + buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); + memcpy(buf, res->data, 16); + dev_kfree_skb(skb); - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_ACCESS, &req, - sizeof(req), true); + return 0; } int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index) @@ -3205,11 +3279,11 @@ int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index) .action = index, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_THERMAL_CTRL, &req, - sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_THERMAL_CTRL, &req, + sizeof(req), true); } -int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx) +int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx) { struct { __le32 cmd; @@ -3223,8 +3297,8 @@ int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx) .dump_group = cpu_to_le16(1), }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RATE_CTRL, &req, - sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RATE_CTRL, &req, + sizeof(req), false); } int mt7915_mcu_set_sku(struct mt7915_phy *phy) @@ -3251,9 +3325,31 @@ int mt7915_mcu_set_sku(struct mt7915_phy *phy) for (i = 0; i < MT7915_SKU_RATE_NUM; i++) req.val[i] = hw->conf.power_level * 2 + delta[i]; - return __mt76_mcu_send_msg(&dev->mt76, - MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req, + sizeof(req), true); +} + +int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode, + u8 en) +{ + struct { + u8 test_mode_en; + u8 param_idx; + u8 _rsv[2]; + + u8 enable; + u8 _rsv2[3]; + + u8 pad[8]; + } __packed req = { + .test_mode_en = test_mode, + .param_idx = param, + .enable = en, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, + sizeof(req), false); } int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable) @@ -3270,9 +3366,9 @@ int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable) .sku_enable = enable, }; - return __mt76_mcu_send_msg(&dev->mt76, - MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, &req, + sizeof(req), true); } int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band) @@ -3288,8 +3384,8 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band) .band = band, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SER_TRIGGER, - &req, sizeof(req), false); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SER_TRIGGER, + &req, sizeof(req), false); } int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev) @@ -3306,8 +3402,8 @@ int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev) .ibf = false, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, &req, + sizeof(req), true); } int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev) @@ -3325,8 +3421,8 @@ int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev) .snd_mode = MT_BF_PROCESSING, }; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, - &req, sizeof(req), true); + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_TXBF_ACTION, &req, + sizeof(req), true); } int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, @@ -3342,14 +3438,112 @@ int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif, u8 drop_tx_idx; u8 sta_idx; /* 256 sta */ u8 rsv[2]; - u32 val; + __le32 val; } __packed req = { .action = MT_SPR_ENABLE, .arg_num = 1, .band_idx = mvif->band_idx, - .val = enable, + .val = cpu_to_le32(enable), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SPR, &req, + sizeof(req), true); +} + +int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct rate_info *rate) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct mt7915_dev *dev = phy->dev; + struct mt76_phy *mphy = phy->mt76; + struct { + u8 category; + u8 band; + __le16 wcid; + } __packed req = { + .category = MCU_PHY_STATE_CONTENTION_RX_RATE, + .band = mvif->band_idx, + .wcid = cpu_to_le16(msta->wcid.idx), }; + struct ieee80211_supported_band *sband; + struct mt7915_mcu_phy_rx_info *res; + struct sk_buff *skb; + u16 flags = 0; + int ret; + int i; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD_PHY_STAT_INFO, + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + res = (struct mt7915_mcu_phy_rx_info *)skb->data; + + rate->mcs = res->rate; + rate->nss = res->nsts + 1; + + switch (res->mode) { + case MT_PHY_TYPE_CCK: + case MT_PHY_TYPE_OFDM: + if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + for (i = 0; i < sband->n_bitrates; i++) { + if (rate->mcs != (sband->bitrates[i].hw_value & 0xf)) + continue; + + rate->legacy = sband->bitrates[i].bitrate; + break; + } + break; + case MT_PHY_TYPE_HT: + case MT_PHY_TYPE_HT_GF: + if (rate->mcs > 31) + return -EINVAL; - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_SPR, - &req, sizeof(req), true); + flags |= RATE_INFO_FLAGS_MCS; + + if (res->gi) + flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_VHT: + flags |= RATE_INFO_FLAGS_VHT_MCS; + + if (res->gi) + flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case MT_PHY_TYPE_HE_SU: + case MT_PHY_TYPE_HE_EXT_SU: + case MT_PHY_TYPE_HE_TB: + case MT_PHY_TYPE_HE_MU: + rate->he_gi = res->gi; + + flags |= RATE_INFO_FLAGS_HE_MCS; + break; + default: + break; + } + rate->flags = flags; + + switch (res->bw) { + case IEEE80211_STA_RX_BW_160: + rate->bw = RATE_INFO_BW_160; + break; + case IEEE80211_STA_RX_BW_80: + rate->bw = RATE_INFO_BW_80; + break; + case IEEE80211_STA_RX_BW_40: + rate->bw = RATE_INFO_BW_40; + break; + default: + rate->bw = RATE_INFO_BW_20; + break; + } + + dev_kfree_skb(skb); + + return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index c656d66385c4..cd1a4256c843 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -46,6 +46,11 @@ enum { MCU_EXT_EVENT_RATE_REPORT = 0x87, }; +enum { + MCU_ATE_SET_TRX = 0x1, + MCU_ATE_SET_FREQ_OFFSET = 0xa, +}; + struct mt7915_mcu_rxd { __le32 rxd[6]; @@ -153,6 +158,18 @@ struct mt7915_mcu_ra_info { u8 prob_down_pending; } __packed; + +struct mt7915_mcu_phy_rx_info { + u8 category; + u8 rate; + u8 mode; + u8 nsts; + u8 gi; + u8 coding; + u8 stbc; + u8 bw; +}; + #define MT_RA_RATE_NSS GENMASK(8, 6) #define MT_RA_RATE_MCS GENMASK(3, 0) #define MT_RA_RATE_TX_MODE GENMASK(12, 9) @@ -201,19 +218,24 @@ enum { MCU_EXT_CMD_EDCA_UPDATE = 0x27, MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, MCU_EXT_CMD_THERMAL_CTRL = 0x2c, + MCU_EXT_CMD_WTBL_UPDATE = 0x32, MCU_EXT_CMD_SET_DRR_CTRL = 0x36, MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, + MCU_EXT_CMD_ATE_CTRL = 0x3d, MCU_EXT_CMD_PROTECT_CTRL = 0x3e, MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, MCU_EXT_CMD_RX_HDR_TRANS = 0x47, + MCU_EXT_CMD_MUAR_UPDATE = 0x48, MCU_EXT_CMD_SET_RX_PATH = 0x4e, MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58, + MCU_EXT_CMD_MWDS_SUPPORT = 0x80, MCU_EXT_CMD_SET_SER_TRIGGER = 0x81, MCU_EXT_CMD_SCS_CTRL = 0x82, MCU_EXT_CMD_RATE_CTRL = 0x87, MCU_EXT_CMD_FW_DBG_CTRL = 0x95, MCU_EXT_CMD_SET_RDD_TH = 0x9d, MCU_EXT_CMD_SET_SPR = 0xa8, + MCU_EXT_CMD_PHY_STAT_INFO = 0xad, }; enum { @@ -247,6 +269,14 @@ enum { EE_FORMAT_MULTIPLE, }; +enum { + MCU_PHY_STATE_TX_RATE, + MCU_PHY_STATE_RX_RATE, + MCU_PHY_STATE_RSSI, + MCU_PHY_STATE_CONTENTION_RX_RATE, + MCU_PHY_STATE_OFDMLQ_CNINFO, +}; + #define STA_TYPE_STA BIT(0) #define STA_TYPE_AP BIT(1) #define STA_TYPE_ADHOC BIT(2) @@ -354,15 +384,6 @@ struct bss_info_ext_bss { u8 rsv[8]; } __packed; -struct bss_info_sync_mode { - __le16 tag; - __le16 len; - __le16 bcn_interval; - u8 enable; - u8 dtim_period; - u8 rsv[8]; -} __packed; - struct bss_info_bmc_rate { __le16 tag; __le16 len; @@ -480,7 +501,7 @@ enum { BSS_INFO_LQ_RM, /* obsoleted */ BSS_INFO_EXT_BSS, BSS_INFO_BMC_RATE, /* for bmc rate control in CR4 */ - BSS_INFO_SYNC_MODE, + BSS_INFO_SYNC_MODE, /* obsoleted */ BSS_INFO_RA, BSS_INFO_HW_AMSDU, BSS_INFO_BSS_COLOR, @@ -551,6 +572,15 @@ struct wtbl_vht { u8 rsv[4]; } __packed; +struct wtbl_hdr_trans { + __le16 tag; + __le16 len; + u8 to_ds; + u8 from_ds; + u8 no_rx_trans; + u8 _rsv; +}; + enum { MT_BA_TYPE_INVALID, MT_BA_TYPE_ORIGINATOR, @@ -972,6 +1002,7 @@ enum { sizeof(struct wtbl_rx) + \ sizeof(struct wtbl_ht) + \ sizeof(struct wtbl_vht) + \ + sizeof(struct wtbl_hdr_trans) +\ sizeof(struct wtbl_ba) + \ sizeof(struct wtbl_smps)) @@ -997,8 +1028,7 @@ enum { sizeof(struct bss_info_hw_amsdu) +\ sizeof(struct bss_info_he) + \ sizeof(struct bss_info_bmc_rate) +\ - sizeof(struct bss_info_ext_bss) +\ - sizeof(struct bss_info_sync_mode)) + sizeof(struct bss_info_ext_bss)) #define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct bss_info_bcn_csa) + \ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 4b8908fa7eda..0339abf360d3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -9,7 +9,7 @@ #include "../mt76.h" #include "regs.h" -#define MT7915_MAX_INTERFACES 4 +#define MT7915_MAX_INTERFACES 32 #define MT7915_MAX_WMM_SETS 4 #define MT7915_WTBL_SIZE 288 #define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1) @@ -32,6 +32,7 @@ #define MT7915_EEPROM_SIZE 3584 #define MT7915_TOKEN_SIZE 8192 +#define MT7915_TOKEN_FREE_THR 64 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ @@ -112,8 +113,10 @@ struct mt7915_phy { struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES]; + struct ieee80211_vif *monitor_vif; + u32 rxfilter; - u32 omac_mask; + u64 omac_mask; u16 noise; u16 chainmask; @@ -159,11 +162,27 @@ struct mt7915_dev { u32 hw_pattern; spinlock_t token_lock; + int token_count; struct idr token; s8 **rate_power; /* TODO: use mt76_rate_power */ + bool dbdc_support; bool fw_debug; + +#ifdef CONFIG_NL80211_TESTMODE + struct { + u32 *reg_backup; + + s32 last_freq_offset; + u8 last_rcpi[4]; + s8 last_ib_rssi[4]; + s8 last_wb_rssi[4]; + u8 last_snr; + + u8 spe_idx; + } test; +#endif }; enum { @@ -171,24 +190,13 @@ enum { HW_BSSID_1, HW_BSSID_2, HW_BSSID_3, - HW_BSSID_MAX, + HW_BSSID_MAX = HW_BSSID_3, EXT_BSSID_START = 0x10, EXT_BSSID_1, - EXT_BSSID_2, - EXT_BSSID_3, - EXT_BSSID_4, - EXT_BSSID_5, - EXT_BSSID_6, - EXT_BSSID_7, - EXT_BSSID_8, - EXT_BSSID_9, - EXT_BSSID_10, - EXT_BSSID_11, - EXT_BSSID_12, - EXT_BSSID_13, - EXT_BSSID_14, - EXT_BSSID_15, - EXT_BSSID_END + EXT_BSSID_15 = 0x1f, + EXT_BSSID_MAX = EXT_BSSID_15, + REPEATER_BSSID_START = 0x20, + REPEATER_BSSID_MAX = 0x3f, }; enum { @@ -264,15 +272,14 @@ static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac) extern const struct ieee80211_ops mt7915_ops; extern struct pci_driver mt7915_pci_driver; +extern const struct mt76_testmode_ops mt7915_testmode_ops; u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr); int mt7915_register_device(struct mt7915_dev *dev); void mt7915_unregister_device(struct mt7915_dev *dev); -int mt7915_register_ext_phy(struct mt7915_dev *dev); -void mt7915_unregister_ext_phy(struct mt7915_dev *dev); int mt7915_eeprom_init(struct mt7915_dev *dev); -u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset); +void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy); int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, struct ieee80211_channel *chan, u8 chain_idx); @@ -281,7 +288,7 @@ int mt7915_dma_init(struct mt7915_dev *dev); void mt7915_dma_prefetch(struct mt7915_dev *dev); void mt7915_dma_cleanup(struct mt7915_dev *dev); int mt7915_mcu_init(struct mt7915_dev *dev); -int mt7915_mcu_add_dev_info(struct mt7915_dev *dev, +int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, bool enable); int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, struct ieee80211_vif *vif, int enable); @@ -289,6 +296,9 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable); int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable); +int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, struct ieee80211_ampdu_params *params, bool add); @@ -306,6 +316,7 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); +int mt7915_set_channel(struct mt7915_phy *phy); int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd); int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif); int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev, @@ -314,6 +325,8 @@ int mt7915_mcu_set_eeprom(struct mt7915_dev *dev); int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset); int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable, bool hdr_trans); +int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode, + u8 en); int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable); int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band); int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val); @@ -327,8 +340,10 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, const struct mt7915_dfs_pulse *pulse); int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, const struct mt7915_dfs_pattern *pattern); -int mt7915_mcu_get_rate_info(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx); int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index); +int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx); +int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct rate_info *rate); int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd, u8 index, u8 rx_sel, u8 val); int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl); @@ -428,11 +443,13 @@ mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val) bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask); void mt7915_mac_reset_counters(struct mt7915_phy *phy); void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy); +void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy); void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, bool beacon); void mt7915_mac_set_timing(struct mt7915_phy *phy); int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb); +void mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb); void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb); int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -446,6 +463,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info); void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); +int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc); void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index fe62b4d853e4..aeb86fbea41c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -21,8 +21,14 @@ static void mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) { struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + static const u32 rx_irq_mask[] = { + [MT_RXQ_MAIN] = MT_INT_RX_DONE_DATA0, + [MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1, + [MT_RXQ_MCU] = MT_INT_RX_DONE_WM, + [MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA, + }; - mt7915_irq_enable(dev, MT_INT_RX_DONE(q)); + mt7915_irq_enable(dev, rx_irq_mask[q]); } /* TODO: support 2/4/6/8 MSI-X vectors */ @@ -49,14 +55,17 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance) if (intr & MT_INT_TX_DONE_MCU) napi_schedule(&dev->mt76.tx_napi); - if (intr & MT_INT_RX_DONE_DATA) - napi_schedule(&dev->mt76.napi[0]); + if (intr & MT_INT_RX_DONE_DATA0) + napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]); + + if (intr & MT_INT_RX_DONE_DATA1) + napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]); if (intr & MT_INT_RX_DONE_WM) - napi_schedule(&dev->mt76.napi[1]); + napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]); if (intr & MT_INT_RX_DONE_WA) - napi_schedule(&dev->mt76.napi[2]); + napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]); if (intr & MT_INT_MCU_CMD) { u32 val = mt76_rr(dev, MT_MCU_CMD); @@ -140,7 +149,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, dev = container_of(mdev, struct mt7915_dev, mt76); ret = mt7915_alloc_device(pdev, dev); if (ret) - return ret; + goto error; mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); mdev->rev = (mt7915_l1_rr(dev, MT_HW_CHIPID) << 16) | @@ -163,7 +172,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev, return 0; error: - ieee80211_free_hw(mt76_hw(dev)); + mt76_free_device(&dev->mt76); + return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index 64327153b7fa..848703e6eb7c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -51,6 +51,9 @@ #define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000) #define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs)) +#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0) +#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25) + #define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090) #define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094) #define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) @@ -67,11 +70,13 @@ #define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) #define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) -/* DMA Band 0 */ -#define MT_WF_DMA_BASE 0x21e00 -#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs)) +#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c) +#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) + +#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00) +#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs)) -#define MT_DMA_DCR0 MT_WF_DMA(0x000) +#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000) #define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3) #define MT_DMA_DCR0_RXD_G5_EN BIT(23) @@ -166,10 +171,33 @@ #define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800) #define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs)) +#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4) +#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4) +#define MT_AGG_PCR0_MM_PROT BIT(0) +#define MT_AGG_PCR0_GF_PROT BIT(1) +#define MT_AGG_PCR0_BW20_PROT BIT(2) +#define MT_AGG_PCR0_BW40_PROT BIT(4) +#define MT_AGG_PCR0_BW80_PROT BIT(6) +#define MT_AGG_PCR0_ERP_PROT GENMASK(12, 8) +#define MT_AGG_PCR0_VHT_PROT BIT(13) +#define MT_AGG_PCR0_PTA_WIN_DIS BIT(15) + +#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23) +#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0) + #define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084) #define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0) #define MT_AGG_ACR_BAR_RATE GENMASK(29, 16) +#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098) +#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12) +#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6) +#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7) +#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24) + +#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0) +#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4) + /* ARB: band 0(0x20c00), band 1(0xa0c00) */ #define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00) #define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs)) @@ -178,6 +206,8 @@ #define MT_ARB_SCR_TX_DISABLE BIT(8) #define MT_ARB_SCR_RX_DISABLE BIT(9) +#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4) + /* RMAC: band 0(0x21400), band 1(0xa1400) */ #define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400) #define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs)) @@ -308,11 +338,11 @@ #define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10) #define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14) -#define MT_INT_RX_DONE_DATA BIT(16) +#define MT_INT_RX_DONE_DATA0 BIT(16) +#define MT_INT_RX_DONE_DATA1 BIT(17) #define MT_INT_RX_DONE_WM BIT(0) #define MT_INT_RX_DONE_WA BIT(1) -#define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16)) -#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16)) +#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | GENMASK(17, 16)) #define MT_INT_TX_DONE_MCU_WA BIT(15) #define MT_INT_TX_DONE_FWDL BIT(26) #define MT_INT_TX_DONE_MCU_WM BIT(27) @@ -385,11 +415,19 @@ #define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs)) #define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188) +#define MT_WF_IRPI_BASE 0x83006000 +#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + ((ofs) << 16)) + /* PHY: band 0(0x83080000), band 1(0x83090000) */ #define MT_WF_PHY_BASE 0x83080000 #define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) #define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16)) +#define MT_WF_PHY_RX_CTRL1_IPI_EN GENMASK(2, 0) #define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9) +#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16)) +#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18) +#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29) + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c new file mode 100644 index 000000000000..9ee82e2d262c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#include "mt7915.h" +#include "mac.h" +#include "mcu.h" +#include "testmode.h" + +enum { + TM_CHANGED_TXPOWER, + TM_CHANGED_FREQ_OFFSET, + + /* must be last */ + NUM_TM_CHANGED +}; + +static const u8 tm_change_map[] = { + [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER, + [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET, +}; + +struct reg_band { + u32 band[2]; +}; + +#define REG_BAND(_reg) \ + { .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) } +#define REG_BAND_IDX(_reg, _idx) \ + { .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) } + +static const struct reg_band reg_backup_list[] = { + REG_BAND_IDX(AGG_PCR0, 0), + REG_BAND_IDX(AGG_PCR0, 1), + REG_BAND_IDX(AGG_AWSCR0, 0), + REG_BAND_IDX(AGG_AWSCR0, 1), + REG_BAND_IDX(AGG_AWSCR0, 2), + REG_BAND_IDX(AGG_AWSCR0, 3), + REG_BAND(AGG_MRCR), + REG_BAND(TMAC_TFCR0), + REG_BAND(TMAC_TCR0), + REG_BAND(AGG_ATCR1), + REG_BAND(AGG_ATCR3), + REG_BAND(TMAC_TRCR0), + REG_BAND(TMAC_ICR0), + REG_BAND_IDX(ARB_DRNGR0, 0), + REG_BAND_IDX(ARB_DRNGR0, 1), + REG_BAND(WF_RFCR), + REG_BAND(WF_RFCR1), +}; + +static int +mt7915_tm_set_tx_power(struct mt7915_phy *phy) +{ + struct mt7915_dev *dev = phy->dev; + struct mt76_phy *mphy = phy->mt76; + struct cfg80211_chan_def *chandef = &mphy->chandef; + int freq = chandef->center_freq1; + int ret; + struct { + u8 format_id; + u8 dbdc_idx; + s8 tx_power; + u8 ant_idx; /* Only 0 is valid */ + u8 center_chan; + u8 rsv[3]; + } __packed req = { + .format_id = 0xf, + .dbdc_idx = phy != &dev->phy, + .center_chan = ieee80211_frequency_to_channel(freq), + }; + u8 *tx_power = NULL; + + if (dev->mt76.test.state != MT76_TM_STATE_OFF) + tx_power = dev->mt76.test.tx_power; + + /* Tx power of the other antennas are the same as antenna 0 */ + if (tx_power && tx_power[0]) + req.tx_power = tx_power[0]; + + ret = mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, + &req, sizeof(req), false); + + return ret; +} + +static int +mt7915_tm_set_freq_offset(struct mt7915_dev *dev, bool en, u32 val) +{ + struct mt7915_tm_cmd req = { + .testmode_en = en, + .param_idx = MCU_ATE_SET_FREQ_OFFSET, + .param.freq.freq_offset = cpu_to_le32(val), + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, + sizeof(req), false); +} + +static int +mt7915_tm_mode_ctrl(struct mt7915_dev *dev, bool enable) +{ + struct { + u8 format_id; + bool enable; + u8 rsv[2]; + } __packed req = { + .format_id = 0x6, + .enable = enable, + }; + + return mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_TX_POWER_FEATURE_CTRL, + &req, sizeof(req), false); +} + +static int +mt7915_tm_set_trx(struct mt7915_dev *dev, struct mt7915_phy *phy, + int type, bool en) +{ + struct mt7915_tm_cmd req = { + .testmode_en = 1, + .param_idx = MCU_ATE_SET_TRX, + .param.trx.type = type, + .param.trx.enable = en, + .param.trx.band = phy != &dev->phy, + }; + + return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, + sizeof(req), false); +} + +static void +mt7915_tm_reg_backup_restore(struct mt7915_dev *dev, struct mt7915_phy *phy) +{ + int n_regs = ARRAY_SIZE(reg_backup_list); + bool ext_phy = phy != &dev->phy; + u32 *b = dev->test.reg_backup; + int i; + + if (dev->mt76.test.state == MT76_TM_STATE_OFF) { + for (i = 0; i < n_regs; i++) + mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]); + return; + } + + if (b) + return; + + b = devm_kzalloc(dev->mt76.dev, 4 * n_regs, GFP_KERNEL); + if (!b) + return; + + dev->test.reg_backup = b; + for (i = 0; i < n_regs; i++) + b[i] = mt76_rr(dev, reg_backup_list[i].band[ext_phy]); + + mt76_clear(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_MM_PROT | + MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT | + MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT | + MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT); + mt76_set(dev, MT_AGG_PCR0(ext_phy, 0), MT_AGG_PCR0_PTA_WIN_DIS); + + mt76_wr(dev, MT_AGG_PCR0(ext_phy, 1), MT_AGG_PCR1_RTS0_NUM_THRES | + MT_AGG_PCR1_RTS0_LEN_THRES); + + mt76_clear(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_BAR_CNT_LIMIT | + MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT | + MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT); + + mt76_rmw(dev, MT_AGG_MRCR(ext_phy), MT_AGG_MRCR_RTS_FAIL_LIMIT | + MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, + FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) | + FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1)); + + mt76_wr(dev, MT_TMAC_TFCR0(ext_phy), 0); + mt76_clear(dev, MT_TMAC_TCR0(ext_phy), MT_TMAC_TCR0_TBTT_STOP_CTRL); + + /* config rx filter for testmode rx */ + mt76_wr(dev, MT_WF_RFCR(ext_phy), 0xcf70a); + mt76_wr(dev, MT_WF_RFCR1(ext_phy), 0); +} + +static void +mt7915_tm_init(struct mt7915_dev *dev) +{ + bool en = !(dev->mt76.test.state == MT76_TM_STATE_OFF); + + if (!test_bit(MT76_STATE_RUNNING, &dev->phy.mt76->state)) + return; + + mt7915_tm_mode_ctrl(dev, en); + mt7915_tm_reg_backup_restore(dev, &dev->phy); + mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_TXRX, !en); +} + +static void +mt7915_tm_set_tx_frames(struct mt7915_dev *dev, bool en) +{ + static const u8 spe_idx_map[] = {0, 0, 1, 0, 3, 2, 4, 0, + 9, 8, 6, 10, 16, 12, 18, 0}; + struct sk_buff *skb = dev->mt76.test.tx_skb; + struct ieee80211_tx_info *info; + + mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_RX_RXV, false); + + if (en) { + u8 tx_ant = dev->mt76.test.tx_antenna_mask; + + mutex_unlock(&dev->mt76.mutex); + mt7915_set_channel(&dev->phy); + mutex_lock(&dev->mt76.mutex); + + mt7915_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH); + dev->test.spe_idx = spe_idx_map[tx_ant]; + } + + mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_TX, en); + + if (!en || !skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = dev->phy.monitor_vif; +} + +static void +mt7915_tm_set_rx_frames(struct mt7915_dev *dev, bool en) +{ + if (en) { + mutex_unlock(&dev->mt76.mutex); + mt7915_set_channel(&dev->phy); + mutex_lock(&dev->mt76.mutex); + + mt7915_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH); + } + + mt7915_tm_set_trx(dev, &dev->phy, TM_MAC_RX_RXV, en); +} + +static void +mt7915_tm_update_params(struct mt7915_dev *dev, u32 changed) +{ + struct mt76_testmode_data *td = &dev->mt76.test; + bool en = dev->mt76.test.state != MT76_TM_STATE_OFF; + + if (changed & BIT(TM_CHANGED_FREQ_OFFSET)) + mt7915_tm_set_freq_offset(dev, en, en ? td->freq_offset : 0); + if (changed & BIT(TM_CHANGED_TXPOWER)) + mt7915_tm_set_tx_power(&dev->phy); +} + +static int +mt7915_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct mt76_testmode_data *td = &mdev->test; + enum mt76_testmode_state prev_state = td->state; + + mdev->test.state = state; + + if (prev_state == MT76_TM_STATE_TX_FRAMES) + mt7915_tm_set_tx_frames(dev, false); + else if (state == MT76_TM_STATE_TX_FRAMES) + mt7915_tm_set_tx_frames(dev, true); + else if (prev_state == MT76_TM_STATE_RX_FRAMES) + mt7915_tm_set_rx_frames(dev, false); + else if (state == MT76_TM_STATE_RX_FRAMES) + mt7915_tm_set_rx_frames(dev, true); + else if (prev_state == MT76_TM_STATE_OFF || state == MT76_TM_STATE_OFF) + mt7915_tm_init(dev); + + if ((state == MT76_TM_STATE_IDLE && + prev_state == MT76_TM_STATE_OFF) || + (state == MT76_TM_STATE_OFF && + prev_state == MT76_TM_STATE_IDLE)) { + u32 changed = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { + u16 cur = tm_change_map[i]; + + if (td->param_set[cur / 32] & BIT(cur % 32)) + changed |= BIT(i); + } + + mt7915_tm_update_params(dev, changed); + } + + return 0; +} + +static int +mt7915_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb, + enum mt76_testmode_state new_state) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct mt76_testmode_data *td = &dev->mt76.test; + u32 changed = 0; + int i; + + BUILD_BUG_ON(NUM_TM_CHANGED >= 32); + + if (new_state == MT76_TM_STATE_OFF || + td->state == MT76_TM_STATE_OFF) + return 0; + + if (td->tx_antenna_mask & ~dev->phy.chainmask) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { + if (tb[tm_change_map[i]]) + changed |= BIT(i); + } + + mt7915_tm_update_params(dev, changed); + + return 0; +} + +static int +mt7915_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg) +{ + struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + void *rx, *rssi; + int i; + + rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX); + if (!rx) + return -ENOMEM; + + if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset)) + return -ENOMEM; + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++) + if (nla_put_u8(msg, i, dev->test.last_rcpi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_IB_RSSI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->test.last_ib_rssi); i++) + if (nla_put_s8(msg, i, dev->test.last_ib_rssi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_WB_RSSI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->test.last_wb_rssi); i++) + if (nla_put_s8(msg, i, dev->test.last_wb_rssi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + if (nla_put_u8(msg, MT76_TM_RX_ATTR_SNR, dev->test.last_snr)) + return -ENOMEM; + + nla_nest_end(msg, rx); + + return 0; +} + +const struct mt76_testmode_ops mt7915_testmode_ops = { + .set_state = mt7915_tm_set_state, + .set_params = mt7915_tm_set_params, + .dump_stats = mt7915_tm_dump_stats, +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h new file mode 100644 index 000000000000..964f2d7fde3a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. */ + +#ifndef __MT7915_TESTMODE_H +#define __MT7915_TESTMODE_H + +struct mt7915_tm_trx { + u8 type; + u8 enable; + u8 band; + u8 rsv; +}; + +struct mt7915_tm_freq_offset { + u8 band; + __le32 freq_offset; +}; + +struct mt7915_tm_cmd { + u8 testmode_en; + u8 param_idx; + u8 _rsv[2]; + union { + __le32 data; + struct mt7915_tm_trx trx; + struct mt7915_tm_freq_offset freq; + u8 test[72]; + } param; +} __packed; + +enum { + TM_MAC_TX = 1, + TM_MAC_RX, + TM_MAC_TXRX, + TM_MAC_TXRX_RXV, + TM_MAC_RXV, + TM_MAC_RX_RXV, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 9a4d95a2a707..62b5b912818f 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -36,46 +36,49 @@ mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) return 0; } -static int mt76s_alloc_tx(struct mt76_dev *dev) +static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) { struct mt76_queue *q; - int i; - - for (i = 0; i < MT_TXQ_MCU_WA; i++) { - q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); - if (!q) - return -ENOMEM; - spin_lock_init(&q->lock); - q->hw_idx = i; - dev->q_tx[i] = q; + q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); + if (!q) + return ERR_PTR(-ENOMEM); - q->entry = devm_kcalloc(dev->dev, - MT_NUM_TX_ENTRIES, sizeof(*q->entry), - GFP_KERNEL); - if (!q->entry) - return -ENOMEM; + spin_lock_init(&q->lock); + q->entry = devm_kcalloc(dev->dev, + MT_NUM_TX_ENTRIES, sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return ERR_PTR(-ENOMEM); - q->ndesc = MT_NUM_TX_ENTRIES; - } + q->ndesc = MT_NUM_TX_ENTRIES; - return 0; + return q; } -void mt76s_stop_txrx(struct mt76_dev *dev) +static int mt76s_alloc_tx(struct mt76_dev *dev) { - struct mt76_sdio *sdio = &dev->sdio; + struct mt76_queue *q; + int i; - cancel_work_sync(&sdio->tx.xmit_work); - cancel_work_sync(&sdio->tx.status_work); - cancel_work_sync(&sdio->rx.recv_work); - cancel_work_sync(&sdio->rx.net_work); - cancel_work_sync(&sdio->stat_work); - clear_bit(MT76_READING_STATS, &dev->phy.state); + for (i = 0; i <= MT_TXQ_PSD; i++) { + q = mt76s_alloc_tx_queue(dev); + if (IS_ERR(q)) + return PTR_ERR(q); - mt76_tx_status_check(dev, NULL, true); + q->qid = i; + dev->phy.q_tx[i] = q; + } + + q = mt76s_alloc_tx_queue(dev); + if (IS_ERR(q)) + return PTR_ERR(q); + + q->qid = MT_MCUQ_WM; + dev->q_mcu[MT_MCUQ_WM] = q; + + return 0; } -EXPORT_SYMBOL_GPL(mt76s_stop_txrx); int mt76s_alloc_queues(struct mt76_dev *dev) { @@ -131,11 +134,32 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) return nframes; } -static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) +static void mt76s_net_worker(struct mt76_worker *w) +{ + struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, + net_worker); + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); + int i, nframes; + + do { + nframes = 0; + + local_bh_disable(); + rcu_read_lock(); + + mt76_for_each_q_rx(dev, i) + nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); + + rcu_read_unlock(); + local_bh_enable(); + } while (nframes > 0); +} + +static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) { - struct mt76_queue *q = dev->q_tx[qid]; + bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM]; struct mt76_queue_entry entry; - bool wake; + int nframes = 0; while (q->queued > 0) { if (!q->entry[q->tail].done) @@ -144,12 +168,13 @@ static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) entry = q->entry[q->tail]; q->entry[q->tail].done = false; - if (qid == MT_TXQ_MCU) { + if (mcu) { dev_kfree_skb(entry.skb); entry.skb = NULL; } mt76_queue_tx_complete(dev, q, &entry); + nframes++; } wake = q->stopped && q->queued < q->ndesc - 8; @@ -159,13 +184,35 @@ static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) if (!q->queued) wake_up(&dev->tx_wait); - if (qid == MT_TXQ_MCU) - return; + if (mcu) + goto out; - mt76_txq_schedule(&dev->phy, qid); + mt76_txq_schedule(&dev->phy, q->qid); if (wake) - ieee80211_wake_queue(dev->hw, qid); + ieee80211_wake_queue(dev->hw, q->qid); +out: + return nframes; +} + +static void mt76s_status_worker(struct mt76_worker *w) +{ + struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, + status_worker); + struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); + int i, nframes; + + do { + nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]); + + for (i = 0; i <= MT_TXQ_PSD; i++) + nframes += mt76s_process_tx_queue(dev, + dev->phy.q_tx[i]); + + if (dev->drv->tx_status_data && + !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) + queue_work(dev->wq, &dev->sdio.stat_work); + } while (nframes > 0); } static void mt76s_tx_status_data(struct work_struct *work) @@ -194,11 +241,10 @@ static void mt76s_tx_status_data(struct work_struct *work) } static int -mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, +mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { - struct mt76_queue *q = dev->q_tx[qid]; struct mt76_tx_info tx_info = { .skb = skb, }; @@ -209,7 +255,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, return -ENOSPC; skb->prev = skb->next = NULL; - err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); + err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info); if (err < 0) return err; @@ -222,10 +268,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, } static int -mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, +mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, u32 tx_info) { - struct mt76_queue *q = dev->q_tx[qid]; int ret = -ENOSPC, len = skb->len, pad; if (q->queued == q->ndesc) @@ -257,7 +302,7 @@ static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) { struct mt76_sdio *sdio = &dev->sdio; - queue_work(sdio->txrx_wq, &sdio->tx.xmit_work); + mt76_worker_schedule(&sdio->txrx_worker); } static const struct mt76_queue_ops sdio_queue_ops = { @@ -266,49 +311,19 @@ static const struct mt76_queue_ops sdio_queue_ops = { .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, }; -static void mt76s_tx_work(struct work_struct *work) -{ - struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, - tx.status_work); - struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); - int i; - - for (i = 0; i < MT_TXQ_MCU_WA; i++) - mt76s_process_tx_queue(dev, i); - - if (dev->drv->tx_status_data && - !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) - queue_work(dev->wq, &dev->sdio.stat_work); -} - -static void mt76s_rx_work(struct work_struct *work) -{ - struct mt76_sdio *sdio = container_of(work, struct mt76_sdio, - rx.net_work); - struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); - int i; - - /* rx processing */ - local_bh_disable(); - rcu_read_lock(); - - mt76_for_each_q_rx(dev, i) - mt76s_process_rx_queue(dev, &dev->q_rx[i]); - - rcu_read_unlock(); - local_bh_enable(); -} - void mt76s_deinit(struct mt76_dev *dev) { struct mt76_sdio *sdio = &dev->sdio; int i; - mt76s_stop_txrx(dev); - if (sdio->txrx_wq) { - destroy_workqueue(sdio->txrx_wq); - sdio->txrx_wq = NULL; - } + mt76_worker_teardown(&sdio->txrx_worker); + mt76_worker_teardown(&sdio->status_worker); + mt76_worker_teardown(&sdio->net_worker); + + cancel_work_sync(&sdio->stat_work); + clear_bit(MT76_READING_STATS, &dev->phy.state); + + mt76_tx_status_check(dev, NULL, true); sdio_claim_host(sdio->func); sdio_release_irq(sdio->func); @@ -335,18 +350,23 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, const struct mt76_bus_ops *bus_ops) { struct mt76_sdio *sdio = &dev->sdio; + int err; - sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq", - WQ_UNBOUND | WQ_HIGHPRI, - WQ_UNBOUND_MAX_ACTIVE); - if (!sdio->txrx_wq) - return -ENOMEM; + err = mt76_worker_setup(dev->hw, &sdio->status_worker, + mt76s_status_worker, "sdio-status"); + if (err) + return err; + + err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker, + "sdio-net"); + if (err) + return err; + + sched_set_fifo_low(sdio->status_worker.task); + sched_set_fifo_low(sdio->net_worker.task); INIT_WORK(&sdio->stat_work, mt76s_tx_status_data); - INIT_WORK(&sdio->tx.status_work, mt76s_tx_work); - INIT_WORK(&sdio->rx.net_work, mt76s_rx_work); - mutex_init(&sdio->sched.lock); dev->queue_ops = &sdio_queue_ops; dev->bus = bus_ops; dev->sdio.func = func; diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c index 883f59c7a7e4..581eb56dc4be 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.c +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -11,6 +11,8 @@ static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_RATE_STBC] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_LTF] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 }, [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED }, @@ -21,6 +23,7 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev) { struct mt76_testmode_data *td = &dev->test; struct mt76_wcid *wcid = &dev->global_wcid; + struct mt76_phy *phy = &dev->phy; struct sk_buff *skb = td->tx_skb; struct mt76_queue *q; int qid; @@ -29,7 +32,7 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev) return; qid = skb_get_queue_mapping(skb); - q = dev->q_tx[qid]; + q = phy->q_tx[qid]; spin_lock_bh(&q->lock); @@ -37,7 +40,8 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev) q->queued < q->ndesc / 2) { int ret; - ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL); + ret = dev->queue_ops->tx_queue_skb(dev, q, skb_get(skb), wcid, + NULL); if (ret < 0) break; @@ -55,13 +59,14 @@ static int mt76_testmode_tx_init(struct mt76_dev *dev) { struct mt76_testmode_data *td = &dev->test; + struct mt76_phy *phy = &dev->phy; struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct sk_buff *skb; u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_FROMDS; struct ieee80211_tx_rate *rate; - u8 max_nss = hweight8(dev->phy.antenna_mask); + u8 max_nss = hweight8(phy->antenna_mask); if (td->tx_antenna_mask) max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); @@ -74,28 +79,32 @@ mt76_testmode_tx_init(struct mt76_dev *dev) td->tx_skb = skb; hdr = __skb_put_zero(skb, td->tx_msdu_len); hdr->frame_control = cpu_to_le16(fc); - memcpy(hdr->addr1, dev->macaddr, sizeof(dev->macaddr)); - memcpy(hdr->addr2, dev->macaddr, sizeof(dev->macaddr)); - memcpy(hdr->addr3, dev->macaddr, sizeof(dev->macaddr)); + memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr)); + memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr)); + memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr)); info = IEEE80211_SKB_CB(skb); info->flags = IEEE80211_TX_CTL_INJECTED | IEEE80211_TX_CTL_NO_ACK | IEEE80211_TX_CTL_NO_PS_BUFFER; + + if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT) + goto out; + rate = &info->control.rates[0]; rate->count = 1; rate->idx = td->tx_rate_idx; switch (td->tx_rate_mode) { case MT76_TM_TX_MODE_CCK: - if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ) + if (phy->chandef.chan->band != NL80211_BAND_2GHZ) return -EINVAL; if (rate->idx > 4) return -EINVAL; break; case MT76_TM_TX_MODE_OFDM: - if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ) + if (phy->chandef.chan->band != NL80211_BAND_2GHZ) break; if (rate->idx > 8) @@ -106,7 +115,7 @@ mt76_testmode_tx_init(struct mt76_dev *dev) case MT76_TM_TX_MODE_HT: if (rate->idx > 8 * max_nss && !(rate->idx == 32 && - dev->phy.chandef.width >= NL80211_CHAN_WIDTH_40)) + phy->chandef.width >= NL80211_CHAN_WIDTH_40)) return -EINVAL; rate->flags |= IEEE80211_TX_RC_MCS; @@ -131,8 +140,11 @@ mt76_testmode_tx_init(struct mt76_dev *dev) if (td->tx_rate_ldpc) info->flags |= IEEE80211_TX_CTL_LDPC; + if (td->tx_rate_stbc) + info->flags |= IEEE80211_TX_CTL_STBC; + if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) { - switch (dev->phy.chandef.width) { + switch (phy->chandef.width) { case NL80211_CHAN_WIDTH_40: rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; break; @@ -147,7 +159,7 @@ mt76_testmode_tx_init(struct mt76_dev *dev) break; } } - +out: skb_set_queue_mapping(skb, IEEE80211_AC_BE); return 0; @@ -334,8 +346,10 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 0, MT76_TM_TX_MODE_MAX) || mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss, 1, hweight8(phy->antenna_mask)) || - mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 1) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 2) || mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_STBC], &td->tx_rate_stbc, 0, 1) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_LTF], &td->tx_ltf, 0, 2) || mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1, phy->antenna_mask) || mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL], @@ -472,6 +486,9 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) || nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) || nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) || + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_STBC, td->tx_rate_stbc) || + (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_LTF) && + nla_put_u8(msg, MT76_TM_ATTR_TX_LTF, td->tx_ltf)) || (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) && nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) || (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) && diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h index 691fe5773244..7efad685a17c 100644 --- a/drivers/net/wireless/mediatek/mt76/testmode.h +++ b/drivers/net/wireless/mediatek/mt76/testmode.h @@ -25,6 +25,8 @@ * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8) * @MT76_TM_ATTR_TX_RATE_SGI: packet tx use short guard interval (u8) * @MT76_TM_ATTR_TX_RATE_LDPC: packet tx enable LDPC (u8) + * @MT76_TM_ATTR_TX_RATE_STBC: packet tx enable STBC (u8) + * @MT76_TM_ATTR_TX_LTF: packet tx LTF, set 0 to 2 for 1x, 2x, and 4x LTF (u8) * * @MT76_TM_ATTR_TX_ANTENNA: tx antenna mask (u8) * @MT76_TM_ATTR_TX_POWER_CONTROL: enable tx power control (u8) @@ -50,6 +52,8 @@ enum mt76_testmode_attr { MT76_TM_ATTR_TX_RATE_IDX, MT76_TM_ATTR_TX_RATE_SGI, MT76_TM_ATTR_TX_RATE_LDPC, + MT76_TM_ATTR_TX_RATE_STBC, + MT76_TM_ATTR_TX_LTF, MT76_TM_ATTR_TX_ANTENNA, MT76_TM_ATTR_TX_POWER_CONTROL, @@ -99,8 +103,9 @@ enum mt76_testmode_stats_attr { * * @MT76_TM_RX_ATTR_FREQ_OFFSET: frequency offset (s32) * @MT76_TM_RX_ATTR_RCPI: received channel power indicator (array, u8) - * @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (s8) - * @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (s8) + * @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (array, s8) + * @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (array, s8) + * @MT76_TM_RX_ATTR_SNR: signal-to-noise ratio (u8) */ enum mt76_testmode_rx_attr { MT76_TM_RX_ATTR_UNSPEC, @@ -109,6 +114,7 @@ enum mt76_testmode_rx_attr { MT76_TM_RX_ATTR_RCPI, MT76_TM_RX_ATTR_IB_RSSI, MT76_TM_RX_ATTR_WB_RSSI, + MT76_TM_RX_ATTR_SNR, /* keep last */ NUM_MT76_TM_RX_ATTRS, @@ -141,12 +147,20 @@ enum mt76_testmode_state { * @MT76_TM_TX_MODE_OFDM: legacy OFDM mode * @MT76_TM_TX_MODE_HT: 802.11n MCS * @MT76_TM_TX_MODE_VHT: 802.11ac MCS + * @MT76_TM_TX_MODE_HE_SU: 802.11ax single-user MIMO + * @MT76_TM_TX_MODE_HE_EXT_SU: 802.11ax extended-range SU + * @MT76_TM_TX_MODE_HE_TB: 802.11ax trigger-based + * @MT76_TM_TX_MODE_HE_MU: 802.11ax multi-user MIMO */ enum mt76_testmode_tx_mode { MT76_TM_TX_MODE_CCK, MT76_TM_TX_MODE_OFDM, MT76_TM_TX_MODE_HT, MT76_TM_TX_MODE_VHT, + MT76_TM_TX_MODE_HE_SU, + MT76_TM_TX_MODE_HE_EXT_SU, + MT76_TM_TX_MODE_HE_TB, + MT76_TM_TX_MODE_HE_MU, /* keep last */ NUM_MT76_TM_TX_MODES, diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 44ef4bc7a46e..25627e70bdad 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -225,23 +225,23 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); static int -__mt76_tx_queue_skb(struct mt76_dev *dev, int qid, struct sk_buff *skb, +__mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta, bool *stop) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct mt76_queue *q; + struct mt76_queue *q = phy->q_tx[qid]; + struct mt76_dev *dev = phy->dev; bool non_aql; int pending; int idx; non_aql = !info->tx_time_est; - idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta); + idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); if (idx < 0 || !sta || !non_aql) return idx; wcid = (struct mt76_wcid *)sta->drv_priv; - q = dev->q_tx[qid]; q->entry[idx].wcid = wcid->idx; pending = atomic_inc_return(&wcid->non_aql_packets); if (stop && pending >= MT_MAX_NON_AQL_PKT) @@ -272,6 +272,7 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, } if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && !ieee80211_is_data(hdr->frame_control) && !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { qid = MT_TXQ_PSD; @@ -285,17 +286,11 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, if (ext_phy) info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; - q = dev->q_tx[qid]; + q = phy->q_tx[qid]; spin_lock_bh(&q->lock); - __mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL); + __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); dev->queue_ops->kick(dev, q); - - if (q->queued > q->ndesc - 8 && !q->stopped) { - ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb)); - q->stopped = true; - } - spin_unlock_bh(&q->lock); } EXPORT_SYMBOL_GPL(mt76_tx); @@ -320,7 +315,7 @@ mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq) } static void -mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, +mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta, struct sk_buff *skb, bool last) { struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; @@ -332,7 +327,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, IEEE80211_TX_CTL_REQ_TX_STATUS; mt76_skb_set_moredata(skb, !last); - __mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta, NULL); + __mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL); } void @@ -344,7 +339,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct mt76_phy *phy = hw->priv; struct mt76_dev *dev = phy->dev; struct sk_buff *last_skb = NULL; - struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD]; + struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD]; int i; spin_lock_bh(&hwq->lock); @@ -363,14 +358,14 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, nframes--; if (last_skb) - mt76_queue_ps_skb(dev, sta, last_skb, false); + mt76_queue_ps_skb(phy, sta, last_skb, false); last_skb = skb; } while (nframes); } if (last_skb) { - mt76_queue_ps_skb(dev, sta, last_skb, true); + mt76_queue_ps_skb(phy, sta, last_skb, true); dev->queue_ops->kick(dev, hwq); } else { ieee80211_sta_eosp(sta); @@ -380,6 +375,13 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta, } EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); +static bool +mt76_txq_stopped(struct mt76_queue *q) +{ + return q->stopped || q->blocked || + q->queued + MT_TXQ_FREE_THR >= q->ndesc; +} + static int mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, struct mt76_txq *mtxq) @@ -409,7 +411,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, ieee80211_get_tx_rates(txq->vif, txq->sta, skb, info->control.rates, 1); - idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop); + idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); if (idx < 0) return idx; @@ -418,10 +420,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, test_bit(MT76_RESET, &phy->state)) return -EBUSY; - if (stop) - break; - - if (q->queued + MT_TXQ_FREE_THR >= q->ndesc) + if (stop || mt76_txq_stopped(q)) break; skb = mt76_txq_dequeue(phy, mtxq); @@ -433,7 +432,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, ieee80211_get_tx_rates(txq->vif, txq->sta, skb, info->control.rates, 1); - idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop); + idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop); if (idx < 0) break; @@ -448,8 +447,8 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q, static int mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) { + struct mt76_queue *q = phy->q_tx[qid]; struct mt76_dev *dev = phy->dev; - struct mt76_queue *q = dev->q_tx[qid]; struct ieee80211_txq *txq; struct mt76_txq *mtxq; struct mt76_wcid *wcid; @@ -463,7 +462,14 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) break; } - if (q->queued + MT_TXQ_FREE_THR >= q->ndesc) + if (dev->queue_ops->tx_cleanup && + q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { + spin_unlock_bh(&q->lock); + dev->queue_ops->tx_cleanup(dev, q, false); + spin_lock_bh(&q->lock); + } + + if (mt76_txq_stopped(q)) break; txq = ieee80211_next_txq(phy->hw, qid); @@ -538,7 +544,7 @@ void mt76_tx_worker(struct mt76_worker *w) #endif } -void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, +void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, bool send_bar) { int i; @@ -551,7 +557,7 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, if (!txq) continue; - hwq = dev->q_tx[mt76_txq_get_qid(txq)]; + hwq = phy->q_tx[mt76_txq_get_qid(txq)]; mtxq = (struct mt76_txq *)txq->drv_priv; spin_lock_bh(&hwq->lock); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index f1ae9ff835b2..dc850109de22 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -627,7 +627,7 @@ static void mt76u_complete_rx(struct urb *urb) q->head = (q->head + 1) % q->ndesc; q->queued++; - tasklet_schedule(&dev->usb.rx_tasklet); + mt76_worker_schedule(&dev->usb.rx_worker); out: spin_unlock_irqrestore(&q->lock, flags); } @@ -665,13 +665,17 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) } mt76u_submit_rx_buf(dev, qid, urb); } - if (qid == MT_RXQ_MAIN) + if (qid == MT_RXQ_MAIN) { + local_bh_disable(); mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); + local_bh_enable(); + } } -static void mt76u_rx_tasklet(unsigned long data) +static void mt76u_rx_worker(struct mt76_worker *w) { - struct mt76_dev *dev = (struct mt76_dev *)data; + struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker); + struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); int i; rcu_read_lock(); @@ -737,8 +741,13 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) struct page *page; int i; - for (i = 0; i < q->ndesc; i++) + for (i = 0; i < q->ndesc; i++) { + if (!q->entry[i].urb) + continue; + mt76u_urb_free(q->entry[i].urb); + q->entry[i].urb = NULL; + } if (!q->rx_page.va) return; @@ -752,6 +761,8 @@ static void mt76u_free_rx(struct mt76_dev *dev) { int i; + mt76_worker_teardown(&dev->usb.rx_worker); + mt76_for_each_q_rx(dev, i) mt76u_free_rx_queue(dev, &dev->q_rx[i]); } @@ -760,6 +771,8 @@ void mt76u_stop_rx(struct mt76_dev *dev) { int i; + mt76_worker_disable(&dev->usb.rx_worker); + mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; int j; @@ -767,8 +780,6 @@ void mt76u_stop_rx(struct mt76_dev *dev) for (j = 0; j < q->ndesc; j++) usb_poison_urb(q->entry[j].urb); } - - tasklet_kill(&dev->usb.rx_tasklet); } EXPORT_SYMBOL_GPL(mt76u_stop_rx); @@ -788,20 +799,23 @@ int mt76u_resume_rx(struct mt76_dev *dev) return err; } + mt76_worker_enable(&dev->usb.rx_worker); + return 0; } EXPORT_SYMBOL_GPL(mt76u_resume_rx); -static void mt76u_tx_worker(struct mt76_worker *w) +static void mt76u_status_worker(struct mt76_worker *w) { - struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); + struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker); + struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb); struct mt76_queue_entry entry; struct mt76_queue *q; bool wake; int i; for (i = 0; i < IEEE80211_NUM_ACS; i++) { - q = dev->q_tx[i]; + q = dev->phy.q_tx[i]; while (q->queued > 0) { if (!q->entry[q->tail].done) @@ -820,7 +834,7 @@ static void mt76u_tx_worker(struct mt76_worker *w) if (!q->queued) wake_up(&dev->tx_wait); - mt76_txq_schedule(&dev->phy, i); + mt76_worker_schedule(&dev->tx_worker); if (dev->drv->tx_status_data && !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) @@ -864,7 +878,7 @@ static void mt76u_complete_tx(struct urb *urb) dev_err(dev->dev, "tx urb failed: %d\n", urb->status); e->done = true; - mt76_worker_schedule(&dev->tx_worker); + mt76_worker_schedule(&dev->usb.status_worker); } static int @@ -887,11 +901,10 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, } static int -mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, +mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { - struct mt76_queue *q = dev->q_tx[qid]; struct mt76_tx_info tx_info = { .skb = skb, }; @@ -902,7 +915,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, return -ENOSPC; skb->prev = skb->next = NULL; - err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); + err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info); if (err < 0) return err; @@ -970,7 +983,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) for (i = 0; i <= MT_TXQ_PSD; i++) { if (i >= IEEE80211_NUM_ACS) { - dev->q_tx[i] = dev->q_tx[0]; + dev->phy.q_tx[i] = dev->phy.q_tx[0]; continue; } @@ -980,7 +993,9 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) spin_lock_init(&q->lock); q->hw_idx = mt76u_ac_to_hwq(dev, i); - dev->q_tx[i] = q; + q->qid = i; + + dev->phy.q_tx[i] = q; q->entry = devm_kcalloc(dev->dev, MT_NUM_TX_ENTRIES, sizeof(*q->entry), @@ -1003,16 +1018,20 @@ static void mt76u_free_tx(struct mt76_dev *dev) { int i; + mt76_worker_teardown(&dev->usb.status_worker); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { struct mt76_queue *q; int j; - q = dev->q_tx[i]; + q = dev->phy.q_tx[i]; if (!q) continue; - for (j = 0; j < q->ndesc; j++) + for (j = 0; j < q->ndesc; j++) { usb_free_urb(q->entry[j].urb); + q->entry[j].urb = NULL; + } } } @@ -1020,6 +1039,8 @@ void mt76u_stop_tx(struct mt76_dev *dev) { int ret; + mt76_worker_disable(&dev->usb.status_worker); + ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy), HZ / 5); if (!ret) { @@ -1030,7 +1051,7 @@ void mt76u_stop_tx(struct mt76_dev *dev) dev_err(dev->dev, "timed out waiting for pending tx\n"); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - q = dev->q_tx[i]; + q = dev->phy.q_tx[i]; if (!q) continue; @@ -1044,7 +1065,7 @@ void mt76u_stop_tx(struct mt76_dev *dev) * will fail to submit urb, cleanup those skb's manually. */ for (i = 0; i < IEEE80211_NUM_ACS; i++) { - q = dev->q_tx[i]; + q = dev->phy.q_tx[i]; if (!q) continue; @@ -1061,6 +1082,8 @@ void mt76u_stop_tx(struct mt76_dev *dev) cancel_work_sync(&dev->usb.stat_work); clear_bit(MT76_READING_STATS, &dev->phy.state); + mt76_worker_enable(&dev->usb.status_worker); + mt76_tx_status_check(dev, NULL, true); } EXPORT_SYMBOL_GPL(mt76u_stop_tx); @@ -1103,15 +1126,13 @@ int mt76u_init(struct mt76_dev *dev, }; struct usb_device *udev = interface_to_usbdev(intf); struct mt76_usb *usb = &dev->usb; - int err = -ENOMEM; + int err; mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr; mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr; mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw; mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy; - dev->tx_worker.fn = mt76u_tx_worker; - tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); INIT_WORK(&usb->stat_work, mt76u_tx_status_data); usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1); @@ -1120,7 +1141,7 @@ int mt76u_init(struct mt76_dev *dev, usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL); if (!usb->data) - goto error; + return -ENOMEM; mutex_init(&usb->usb_ctrl_mtx); dev->bus = &mt76u_ops; @@ -1132,14 +1153,22 @@ int mt76u_init(struct mt76_dev *dev, err = mt76u_set_endpoints(intf, usb); if (err < 0) - goto error; + return err; - return 0; + err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker, + "usb-rx"); + if (err) + return err; + + err = mt76_worker_setup(dev->hw, &usb->status_worker, + mt76u_status_worker, "usb-status"); + if (err) + return err; -error: - destroy_workqueue(dev->wq); + sched_set_fifo_low(usb->rx_worker.task); + sched_set_fifo_low(usb->status_worker.task); - return err; + return 0; } EXPORT_SYMBOL_GPL(mt76u_init); diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 09f931d4598c..5f99054f535b 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -212,9 +212,9 @@ out: spin_unlock_irqrestore(&dev->rx_lock, flags); } -static void mt7601u_rx_tasklet(unsigned long data) +static void mt7601u_rx_tasklet(struct tasklet_struct *t) { - struct mt7601u_dev *dev = (struct mt7601u_dev *) data; + struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet); struct mt7601u_dma_buf_rx *e; while ((e = mt7601u_rx_get_pending_entry(dev))) { @@ -266,9 +266,9 @@ out: spin_unlock_irqrestore(&dev->tx_lock, flags); } -static void mt7601u_tx_tasklet(unsigned long data) +static void mt7601u_tx_tasklet(struct tasklet_struct *t) { - struct mt7601u_dev *dev = (struct mt7601u_dev *) data; + struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet); struct sk_buff_head skbs; unsigned long flags; @@ -507,8 +507,8 @@ int mt7601u_dma_init(struct mt7601u_dev *dev) { int ret = -ENOMEM; - tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev); - tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev); + tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet); + tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet); ret = mt7601u_alloc_tx(dev); if (ret) diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index c1ac1d84790f..e3dd205cbbe5 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1709,7 +1709,7 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, { struct wilc *wl; struct wilc_vif *vif; - int ret; + int ret, i; wl = wilc_create_wiphy(dev); if (!wl) @@ -1725,7 +1725,10 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, wl->io_type = io_type; wl->hif_func = ops; wl->chip_ps_state = WILC_CHIP_WAKEDUP; - INIT_LIST_HEAD(&wl->txq_head.list); + + for (i = 0; i < NQUEUES; i++) + INIT_LIST_HEAD(&wl->txq[i].txq_head.list); + INIT_LIST_HEAD(&wl->rxq_head.list); INIT_LIST_HEAD(&wl->vif_list); diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index d025a3093015..a133736a7821 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1276,6 +1276,23 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr) return result; } +int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr) +{ + struct wid wid; + int result; + + wid.id = WID_MAC_ADDR; + wid.type = WID_STR; + wid.size = ETH_ALEN; + wid.val = mac_addr; + + result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); + if (result) + netdev_err(vif->ndev, "Failed to get mac address\n"); + + return result; +} + int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies, size_t ies_len) { diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h index db9179171f05..58811911213b 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.h +++ b/drivers/net/wireless/microchip/wilc1000/hif.h @@ -168,6 +168,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, u8 cipher_mode); int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid); int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr); +int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr); int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies, size_t ies_len); int wilc_disconnect(struct wilc_vif *vif); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 20615c7ec168..2a1fbbdd6a4b 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -628,6 +628,43 @@ static struct net_device_stats *mac_stats(struct net_device *dev) return &vif->netstats; } +static int wilc_set_mac_addr(struct net_device *dev, void *p) +{ + int result; + struct wilc_vif *vif = netdev_priv(dev); + struct wilc *wilc = vif->wilc; + struct sockaddr *addr = (struct sockaddr *)p; + unsigned char mac_addr[ETH_ALEN]; + struct wilc_vif *tmp_vif; + int srcu_idx; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + srcu_idx = srcu_read_lock(&wilc->srcu); + list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) { + wilc_get_mac_address(tmp_vif, mac_addr); + if (ether_addr_equal(addr->sa_data, mac_addr)) { + if (vif != tmp_vif) { + srcu_read_unlock(&wilc->srcu, srcu_idx); + return -EINVAL; + } + srcu_read_unlock(&wilc->srcu, srcu_idx); + return 0; + } + } + srcu_read_unlock(&wilc->srcu, srcu_idx); + + result = wilc_set_mac_address(vif, (u8 *)addr->sa_data); + if (result) + return result; + + ether_addr_copy(vif->bssid, addr->sa_data); + ether_addr_copy(vif->ndev->dev_addr, addr->sa_data); + + return result; +} + static void wilc_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -813,6 +850,7 @@ static const struct net_device_ops wilc_netdev_ops = { .ndo_init = mac_init_fn, .ndo_open = wilc_mac_open, .ndo_stop = wilc_mac_close, + .ndo_set_mac_address = wilc_set_mac_addr, .ndo_start_xmit = wilc_mac_xmit, .ndo_get_stats = mac_stats, .ndo_set_rx_mode = wilc_set_multicast_list, diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index d0a006b68d08..86209b391a3d 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -197,6 +197,14 @@ struct wilc_vif { struct cfg80211_bss *bss; }; +struct wilc_tx_queue_status { + u8 buffer[AC_BUFFER_SIZE]; + u16 end_index; + u16 cnt[NQUEUES]; + u16 sum; + bool initialized; +}; + struct wilc { struct wiphy *wiphy; const struct wilc_hif_func *hif_func; @@ -245,9 +253,10 @@ struct wilc { u32 rx_buffer_offset; u8 *tx_buffer; - struct txq_entry_t txq_head; + struct txq_handle txq[NQUEUES]; int txq_entries; + struct wilc_tx_queue_status tx_q_limit; struct rxq_entry_t rxq_head; const struct firmware *firmware; diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index a18dac0aa6b6..be732929322c 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -35,6 +35,7 @@ static const struct wilc_hif_func wilc_hif_spi; #define CMD_SINGLE_READ 0xca #define CMD_RESET 0xcf +#define SPI_ENABLE_VMM_RETRY_LIMIT 2 #define DATA_PKT_SZ_256 256 #define DATA_PKT_SZ_512 512 #define DATA_PKT_SZ_1K 1024 @@ -856,8 +857,26 @@ static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status) static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val) { - return spi_internal_write(wilc, WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE, - val); + int ret; + int retry = SPI_ENABLE_VMM_RETRY_LIMIT; + u32 check; + + while (retry) { + ret = spi_internal_write(wilc, + WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE, + val); + if (ret) + break; + + ret = spi_internal_read(wilc, + WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE, + &check); + if (ret || ((check & EN_VMM) == (val & EN_VMM))) + break; + + retry--; + } + return ret; } static int wilc_spi_sync_ext(struct wilc *wilc, int nint) diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index 6a82fb2f283e..c12f27be9f79 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -6,6 +6,7 @@ #include <linux/if_ether.h> #include <linux/ip.h> +#include <net/dsfield.h> #include "cfg80211.h" #include "wlan_cfg.h" @@ -28,33 +29,34 @@ static inline void release_bus(struct wilc *wilc, enum bus_release release) mutex_unlock(&wilc->hif_cs); } -static void wilc_wlan_txq_remove(struct wilc *wilc, struct txq_entry_t *tqe) +static void wilc_wlan_txq_remove(struct wilc *wilc, u8 q_num, + struct txq_entry_t *tqe) { list_del(&tqe->list); wilc->txq_entries -= 1; + wilc->txq[q_num].count--; } static struct txq_entry_t * -wilc_wlan_txq_remove_from_head(struct net_device *dev) +wilc_wlan_txq_remove_from_head(struct wilc *wilc, u8 q_num) { struct txq_entry_t *tqe = NULL; unsigned long flags; - struct wilc_vif *vif = netdev_priv(dev); - struct wilc *wilc = vif->wilc; spin_lock_irqsave(&wilc->txq_spinlock, flags); - if (!list_empty(&wilc->txq_head.list)) { - tqe = list_first_entry(&wilc->txq_head.list, struct txq_entry_t, - list); + if (!list_empty(&wilc->txq[q_num].txq_head.list)) { + tqe = list_first_entry(&wilc->txq[q_num].txq_head.list, + struct txq_entry_t, list); list_del(&tqe->list); wilc->txq_entries -= 1; + wilc->txq[q_num].count--; } spin_unlock_irqrestore(&wilc->txq_spinlock, flags); return tqe; } -static void wilc_wlan_txq_add_to_tail(struct net_device *dev, +static void wilc_wlan_txq_add_to_tail(struct net_device *dev, u8 q_num, struct txq_entry_t *tqe) { unsigned long flags; @@ -63,15 +65,16 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev, spin_lock_irqsave(&wilc->txq_spinlock, flags); - list_add_tail(&tqe->list, &wilc->txq_head.list); + list_add_tail(&tqe->list, &wilc->txq[q_num].txq_head.list); wilc->txq_entries += 1; + wilc->txq[q_num].count++; spin_unlock_irqrestore(&wilc->txq_spinlock, flags); complete(&wilc->txq_event); } -static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif, +static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif, u8 q_num, struct txq_entry_t *tqe) { unsigned long flags; @@ -81,8 +84,9 @@ static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif, spin_lock_irqsave(&wilc->txq_spinlock, flags); - list_add(&tqe->list, &wilc->txq_head.list); + list_add(&tqe->list, &wilc->txq[q_num].txq_head.list); wilc->txq_entries += 1; + wilc->txq[q_num].count++; spin_unlock_irqrestore(&wilc->txq_spinlock, flags); mutex_unlock(&wilc->txq_add_to_head_cs); @@ -212,7 +216,7 @@ static void wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev) tqe = f->pending_acks[i].txqe; if (tqe) { - wilc_wlan_txq_remove(wilc, tqe); + wilc_wlan_txq_remove(wilc, tqe->q_num, tqe); tqe->status = 1; if (tqe->tx_complete_func) tqe->tx_complete_func(tqe->priv, @@ -258,18 +262,148 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer, } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); - if (!tqe) + if (!tqe) { + complete(&wilc->cfg_event); return 0; + } tqe->type = WILC_CFG_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; tqe->tx_complete_func = NULL; tqe->priv = NULL; + tqe->q_num = AC_VO_Q; tqe->ack_idx = NOT_TCP_ACK; tqe->vif = vif; - wilc_wlan_txq_add_to_head(vif, tqe); + wilc_wlan_txq_add_to_head(vif, AC_VO_Q, tqe); + + return 1; +} + +static bool is_ac_q_limit(struct wilc *wl, u8 q_num) +{ + u8 factors[NQUEUES] = {1, 1, 1, 1}; + u16 i; + unsigned long flags; + struct wilc_tx_queue_status *q = &wl->tx_q_limit; + u8 end_index; + u8 q_limit; + bool ret = false; + + spin_lock_irqsave(&wl->txq_spinlock, flags); + if (!q->initialized) { + for (i = 0; i < AC_BUFFER_SIZE; i++) + q->buffer[i] = i % NQUEUES; + + for (i = 0; i < NQUEUES; i++) { + q->cnt[i] = AC_BUFFER_SIZE * factors[i] / NQUEUES; + q->sum += q->cnt[i]; + } + q->end_index = AC_BUFFER_SIZE - 1; + q->initialized = 1; + } + + end_index = q->end_index; + q->cnt[q->buffer[end_index]] -= factors[q->buffer[end_index]]; + q->cnt[q_num] += factors[q_num]; + q->sum += (factors[q_num] - factors[q->buffer[end_index]]); + + q->buffer[end_index] = q_num; + if (end_index > 0) + q->end_index--; + else + q->end_index = AC_BUFFER_SIZE - 1; + + if (!q->sum) + q_limit = 1; + else + q_limit = (q->cnt[q_num] * FLOW_CONTROL_UPPER_THRESHOLD / q->sum) + 1; + + if (wl->txq[q_num].count <= q_limit) + ret = true; + + spin_unlock_irqrestore(&wl->txq_spinlock, flags); + + return ret; +} + +static inline u8 ac_classify(struct wilc *wilc, struct sk_buff *skb) +{ + u8 q_num = AC_BE_Q; + u8 dscp; + + switch (skb->protocol) { + case htons(ETH_P_IP): + dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc; + break; + case htons(ETH_P_IPV6): + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc; + break; + default: + return q_num; + } + + switch (dscp) { + case 0x08: + case 0x20: + case 0x40: + q_num = AC_BK_Q; + break; + case 0x80: + case 0xA0: + case 0x28: + q_num = AC_VI_Q; + break; + case 0xC0: + case 0xD0: + case 0xE0: + case 0x88: + case 0xB8: + q_num = AC_VO_Q; + break; + } + + return q_num; +} + +static inline int ac_balance(struct wilc *wl, u8 *ratio) +{ + u8 i, max_count = 0; + + if (!ratio) + return -EINVAL; + + for (i = 0; i < NQUEUES; i++) + if (wl->txq[i].fw.count > max_count) + max_count = wl->txq[i].fw.count; + + for (i = 0; i < NQUEUES; i++) + ratio[i] = max_count - wl->txq[i].fw.count; + + return 0; +} + +static inline void ac_update_fw_ac_pkt_info(struct wilc *wl, u32 reg) +{ + wl->txq[AC_BK_Q].fw.count = FIELD_GET(BK_AC_COUNT_FIELD, reg); + wl->txq[AC_BE_Q].fw.count = FIELD_GET(BE_AC_COUNT_FIELD, reg); + wl->txq[AC_VI_Q].fw.count = FIELD_GET(VI_AC_COUNT_FIELD, reg); + wl->txq[AC_VO_Q].fw.count = FIELD_GET(VO_AC_COUNT_FIELD, reg); + + wl->txq[AC_BK_Q].fw.acm = FIELD_GET(BK_AC_ACM_STAT_FIELD, reg); + wl->txq[AC_BE_Q].fw.acm = FIELD_GET(BE_AC_ACM_STAT_FIELD, reg); + wl->txq[AC_VI_Q].fw.acm = FIELD_GET(VI_AC_ACM_STAT_FIELD, reg); + wl->txq[AC_VO_Q].fw.acm = FIELD_GET(VO_AC_ACM_STAT_FIELD, reg); +} + +static inline u8 ac_change(struct wilc *wilc, u8 *ac) +{ + do { + if (wilc->txq[*ac].fw.acm == 0) + return 0; + (*ac)++; + } while (*ac < NQUEUES); return 1; } @@ -281,16 +415,21 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer, struct txq_entry_t *tqe; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc; + u8 q_num; wilc = vif->wilc; - if (wilc->quit) + if (wilc->quit) { + tx_complete_fn(priv, 0); return 0; + } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); - if (!tqe) + if (!tqe) { + tx_complete_fn(priv, 0); return 0; + } tqe->type = WILC_NET_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; @@ -298,10 +437,24 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer, tqe->priv = priv; tqe->vif = vif; - tqe->ack_idx = NOT_TCP_ACK; - if (vif->ack_filter.enabled) - tcp_process(dev, tqe); - wilc_wlan_txq_add_to_tail(dev, tqe); + q_num = ac_classify(wilc, priv); + tqe->q_num = q_num; + if (ac_change(wilc, &q_num)) { + tx_complete_fn(priv, 0); + kfree(tqe); + return 0; + } + + if (is_ac_q_limit(wilc, q_num)) { + tqe->ack_idx = NOT_TCP_ACK; + if (vif->ack_filter.enabled) + tcp_process(dev, tqe); + wilc_wlan_txq_add_to_tail(dev, q_num, tqe); + } else { + tx_complete_fn(priv, 0); + kfree(tqe); + } + return wilc->txq_entries; } @@ -315,34 +468,39 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer, wilc = vif->wilc; - if (wilc->quit) + if (wilc->quit) { + tx_complete_fn(priv, 0); return 0; + } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); - if (!tqe) + if (!tqe) { + tx_complete_fn(priv, 0); return 0; + } tqe->type = WILC_MGMT_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; tqe->tx_complete_func = tx_complete_fn; tqe->priv = priv; + tqe->q_num = AC_BE_Q; tqe->ack_idx = NOT_TCP_ACK; tqe->vif = vif; - wilc_wlan_txq_add_to_tail(dev, tqe); + wilc_wlan_txq_add_to_tail(dev, AC_VO_Q, tqe); return 1; } -static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc) +static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc, u8 q_num) { struct txq_entry_t *tqe = NULL; unsigned long flags; spin_lock_irqsave(&wilc->txq_spinlock, flags); - if (!list_empty(&wilc->txq_head.list)) - tqe = list_first_entry(&wilc->txq_head.list, struct txq_entry_t, - list); + if (!list_empty(&wilc->txq[q_num].txq_head.list)) + tqe = list_first_entry(&wilc->txq[q_num].txq_head.list, + struct txq_entry_t, list); spin_unlock_irqrestore(&wilc->txq_spinlock, flags); @@ -350,13 +508,14 @@ static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc) } static struct txq_entry_t *wilc_wlan_txq_get_next(struct wilc *wilc, - struct txq_entry_t *tqe) + struct txq_entry_t *tqe, + u8 q_num) { unsigned long flags; spin_lock_irqsave(&wilc->txq_spinlock, flags); - if (!list_is_last(&tqe->list, &wilc->txq_head.list)) + if (!list_is_last(&tqe->list, &wilc->txq[q_num].txq_head.list)) tqe = list_next_entry(tqe, list); else tqe = NULL; @@ -479,54 +638,91 @@ EXPORT_SYMBOL_GPL(host_sleep_notify); int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) { int i, entries = 0; + u8 k, ac; u32 sum; u32 reg; + u8 ac_desired_ratio[NQUEUES] = {0, 0, 0, 0}; + u8 ac_preserve_ratio[NQUEUES] = {1, 1, 1, 1}; + u8 *num_pkts_to_add; + u8 vmm_entries_ac[WILC_VMM_TBL_SIZE]; u32 offset = 0; + bool max_size_over = 0, ac_exist = 0; int vmm_sz = 0; - struct txq_entry_t *tqe; + struct txq_entry_t *tqe_q[NQUEUES]; int ret = 0; int counter; int timeout; u32 vmm_table[WILC_VMM_TBL_SIZE]; + u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; const struct wilc_hif_func *func; + int srcu_idx; u8 *txb = wilc->tx_buffer; - struct net_device *dev; struct wilc_vif *vif; if (wilc->quit) goto out_update_cnt; + if (ac_balance(wilc, ac_desired_ratio)) + return -EINVAL; + mutex_lock(&wilc->txq_add_to_head_cs); - tqe = wilc_wlan_txq_get_first(wilc); - if (!tqe) - goto out_unlock; - dev = tqe->vif->ndev; - wilc_wlan_txq_filter_dup_tcp_ack(dev); + + srcu_idx = srcu_read_lock(&wilc->srcu); + list_for_each_entry_rcu(vif, &wilc->vif_list, list) + wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev); + srcu_read_unlock(&wilc->srcu, srcu_idx); + + for (ac = 0; ac < NQUEUES; ac++) + tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac); + i = 0; sum = 0; - while (tqe && (i < (WILC_VMM_TBL_SIZE - 1))) { - if (tqe->type == WILC_CFG_PKT) - vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET; - else if (tqe->type == WILC_NET_PKT) - vmm_sz = ETH_ETHERNET_HDR_OFFSET; - else - vmm_sz = HOST_HDR_OFFSET; - - vmm_sz += tqe->buffer_size; - vmm_sz = ALIGN(vmm_sz, 4); - - if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE) - break; + max_size_over = 0; + num_pkts_to_add = ac_desired_ratio; + do { + ac_exist = 0; + for (ac = 0; (ac < NQUEUES) && (!max_size_over); ac++) { + if (!tqe_q[ac]) + continue; + + ac_exist = 1; + for (k = 0; (k < num_pkts_to_add[ac]) && + (!max_size_over) && tqe_q[ac]; k++) { + if (i >= (WILC_VMM_TBL_SIZE - 1)) { + max_size_over = 1; + break; + } - vmm_table[i] = vmm_sz / 4; - if (tqe->type == WILC_CFG_PKT) - vmm_table[i] |= BIT(10); - cpu_to_le32s(&vmm_table[i]); + if (tqe_q[ac]->type == WILC_CFG_PKT) + vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET; + else if (tqe_q[ac]->type == WILC_NET_PKT) + vmm_sz = ETH_ETHERNET_HDR_OFFSET; + else + vmm_sz = HOST_HDR_OFFSET; - i++; - sum += vmm_sz; - tqe = wilc_wlan_txq_get_next(wilc, tqe); - } + vmm_sz += tqe_q[ac]->buffer_size; + vmm_sz = ALIGN(vmm_sz, 4); + + if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE) { + max_size_over = 1; + break; + } + vmm_table[i] = vmm_sz / 4; + if (tqe_q[ac]->type == WILC_CFG_PKT) + vmm_table[i] |= BIT(10); + + cpu_to_le32s(&vmm_table[i]); + vmm_entries_ac[i] = ac; + + i++; + sum += vmm_sz; + tqe_q[ac] = wilc_wlan_txq_get_next(wilc, + tqe_q[ac], + ac); + } + } + num_pkts_to_add = ac_preserve_ratio; + } while (!max_size_over && ac_exist); if (i == 0) goto out_unlock; @@ -540,8 +736,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) if (ret) break; - if ((reg & 0x1) == 0) + if ((reg & 0x1) == 0) { + ac_update_fw_ac_pkt_info(wilc, reg); break; + } counter++; if (counter > 200) { @@ -610,11 +808,13 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) offset = 0; i = 0; do { + struct txq_entry_t *tqe; u32 header, buffer_offset; char *bssid; u8 mgmt_ptk = 0; - tqe = wilc_wlan_txq_remove_from_head(dev); + tqe = wilc_wlan_txq_remove_from_head(wilc, vmm_entries_ac[i]); + ac_pkt_num_to_chip[vmm_entries_ac[i]]++; if (!tqe) break; @@ -639,8 +839,11 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) if (tqe->type == WILC_CFG_PKT) { buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET; } else if (tqe->type == WILC_NET_PKT) { + int prio = tqe->q_num; + bssid = tqe->vif->bssid; buffer_offset = ETH_ETHERNET_HDR_OFFSET; + memcpy(&txb[offset + 4], &prio, sizeof(prio)); memcpy(&txb[offset + 8], bssid, 6); } else { buffer_offset = HOST_HDR_OFFSET; @@ -658,6 +861,8 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) vif->ack_filter.pending_acks[tqe->ack_idx].txqe = NULL; kfree(tqe); } while (--entries); + for (i = 0; i < NQUEUES; i++) + wilc->txq[i].fw.count += ac_pkt_num_to_chip[i]; acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); @@ -956,14 +1161,17 @@ void wilc_wlan_cleanup(struct net_device *dev) { struct txq_entry_t *tqe; struct rxq_entry_t *rqe; + u8 ac; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; wilc->quit = 1; - while ((tqe = wilc_wlan_txq_remove_from_head(dev))) { - if (tqe->tx_complete_func) - tqe->tx_complete_func(tqe->priv, 0); - kfree(tqe); + for (ac = 0; ac < NQUEUES; ac++) { + while ((tqe = wilc_wlan_txq_remove_from_head(wilc, ac))) { + if (tqe->tx_complete_func) + tqe->tx_complete_func(tqe->priv, 0); + kfree(tqe); + } } while ((rqe = wilc_wlan_rxq_remove(wilc))) diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h index 7689569cd82f..3d2104f19819 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan.h @@ -207,6 +207,18 @@ #define MODALIAS "WILC_SPI" +#define NQUEUES 4 +#define AC_BUFFER_SIZE 1000 + +#define VO_AC_COUNT_FIELD GENMASK(31, 25) +#define VO_AC_ACM_STAT_FIELD BIT(24) +#define VI_AC_COUNT_FIELD GENMASK(23, 17) +#define VI_AC_ACM_STAT_FIELD BIT(16) +#define BE_AC_COUNT_FIELD GENMASK(15, 9) +#define BE_AC_ACM_STAT_FIELD BIT(8) +#define BK_AC_COUNT_FIELD GENMASK(7, 3) +#define BK_AC_ACM_STAT_FIELD BIT(1) + #define WILC_PKT_HDR_CONFIG_FIELD BIT(31) #define WILC_PKT_HDR_OFFSET_FIELD GENMASK(30, 22) #define WILC_PKT_HDR_TOTAL_LEN_FIELD GENMASK(21, 11) @@ -295,10 +307,17 @@ * Tx/Rx Queue Structure * ********************************************/ +enum ip_pkt_priority { + AC_VO_Q = 0, + AC_VI_Q = 1, + AC_BE_Q = 2, + AC_BK_Q = 3 +}; struct txq_entry_t { struct list_head list; int type; + u8 q_num; int ack_idx; u8 *buffer; int buffer_size; @@ -308,6 +327,17 @@ struct txq_entry_t { void (*tx_complete_func)(void *priv, int status); }; +struct txq_fw_recv_queue_stat { + u8 acm; + u8 count; +}; + +struct txq_handle { + struct txq_entry_t txq_head; + u16 count; + struct txq_fw_recv_queue_stat fw; +}; + struct rxq_entry_t { struct list_head list; u8 *buffer; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index bf6dbeb61842..ad726bd100ec 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -126,28 +126,13 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (unlikely(skb->protocol == htons(ETH_P_PAE))) { qtnf_packet_send_hi_pri(skb); - qtnf_update_tx_stats(ndev, skb); + dev_sw_netstats_tx_add(ndev, 1, skb->len); return NETDEV_TX_OK; } return qtnf_bus_data_tx(mac->bus, skb, mac->macid, vif->vifid); } -/* Netdev handler for getting stats. - */ -static void qtnf_netdev_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) -{ - struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); - - netdev_stats_to_stats64(stats, &ndev->stats); - - if (!vif->stats64) - return; - - dev_fetch_sw_netstats(stats, vif->stats64); -} - /* Netdev handler for transmission timeout. */ static void qtnf_netdev_tx_timeout(struct net_device *ndev, unsigned int txqueue) @@ -211,13 +196,27 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev, return 0; } +static int qtnf_netdev_alloc_pcpu_stats(struct net_device *dev) +{ + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + + return dev->tstats ? 0 : -ENOMEM; +} + +static void qtnf_netdev_free_pcpu_stats(struct net_device *dev) +{ + free_percpu(dev->tstats); +} + /* Network device ops handlers */ const struct net_device_ops qtnf_netdev_ops = { + .ndo_init = qtnf_netdev_alloc_pcpu_stats, + .ndo_uninit = qtnf_netdev_free_pcpu_stats, .ndo_open = qtnf_netdev_open, .ndo_stop = qtnf_netdev_close, .ndo_start_xmit = qtnf_netdev_hard_start_xmit, .ndo_tx_timeout = qtnf_netdev_tx_timeout, - .ndo_get_stats64 = qtnf_netdev_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = qtnf_netdev_set_mac_address, .ndo_get_port_parent_id = qtnf_netdev_port_parent_id, }; @@ -448,10 +447,6 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, qtnf_sta_list_init(&vif->sta_list); INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri); skb_queue_head_init(&vif->high_pri_tx_queue); - vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!vif->stats64) - pr_warn("VIF%u.%u: per cpu stats allocation failed\n", - macid, i); } qtnf_mac_init_primary_intf(mac); @@ -531,7 +526,6 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) } rtnl_unlock(); qtnf_sta_list_free(&vif->sta_list); - free_percpu(vif->stats64); } if (mac->wiphy_registered) @@ -924,46 +918,6 @@ void qtnf_wake_all_queues(struct net_device *ndev) } EXPORT_SYMBOL_GPL(qtnf_wake_all_queues); -void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb) -{ - struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); - struct pcpu_sw_netstats *stats64; - - if (unlikely(!vif || !vif->stats64)) { - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += skb->len; - return; - } - - stats64 = this_cpu_ptr(vif->stats64); - - u64_stats_update_begin(&stats64->syncp); - stats64->rx_packets++; - stats64->rx_bytes += skb->len; - u64_stats_update_end(&stats64->syncp); -} -EXPORT_SYMBOL_GPL(qtnf_update_rx_stats); - -void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb) -{ - struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); - struct pcpu_sw_netstats *stats64; - - if (unlikely(!vif || !vif->stats64)) { - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; - return; - } - - stats64 = this_cpu_ptr(vif->stats64); - - u64_stats_update_begin(&stats64->syncp); - stats64->tx_packets++; - stats64->tx_bytes += skb->len; - u64_stats_update_end(&stats64->syncp); -} -EXPORT_SYMBOL_GPL(qtnf_update_tx_stats); - struct dentry *qtnf_get_debugfs_dir(void) { return qtnf_debugfs_dir; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 269ce12cf8bf..b204a24074ab 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -70,8 +70,6 @@ struct qtnf_vif { struct qtnf_sta_list sta_list; unsigned long cons_tx_timeout_cnt; int generation; - - struct pcpu_sw_netstats __percpu *stats64; }; struct qtnf_mac_info { @@ -139,8 +137,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed); struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid); struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb); void qtnf_wake_all_queues(struct net_device *ndev); -void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb); -void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb); void qtnf_virtual_intf_cleanup(struct net_device *ndev); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index 5337e67092ca..0f328ce47fee 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -299,19 +299,19 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR); if (IS_ERR(sysctl_bar)) { pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); - return ret; + return PTR_ERR(sysctl_bar); } dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR); if (IS_ERR(dmareg_bar)) { pr_err("failed to map BAR%u\n", QTN_DMA_BAR); - return ret; + return PTR_ERR(dmareg_bar); } epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR); if (IS_ERR(epmem_bar)) { pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); - return ret; + return PTR_ERR(epmem_bar); } chipid = qtnf_chip_id_get(sysctl_bar); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 9a20c0f29078..0003df577cb3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -489,7 +489,7 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps) PCI_DMA_TODEVICE); if (skb->dev) { - qtnf_update_tx_stats(skb->dev, skb); + dev_sw_netstats_tx_add(skb->dev, 1, skb->len); if (unlikely(priv->tx_stopped)) { qtnf_wake_all_queues(skb->dev); priv->tx_stopped = 0; @@ -756,7 +756,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget) skb_put(skb, psize); ndev = qtnf_classify_skb(bus, skb); if (likely(ndev)) { - qtnf_update_rx_stats(ndev, skb); + dev_sw_netstats_rx_add(ndev, skb->len); skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(napi, skb); } else { diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index 4b87d3151017..24f1be8ddcef 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -418,7 +418,7 @@ static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts) PCI_DMA_TODEVICE); if (skb->dev) { - qtnf_update_tx_stats(skb->dev, skb); + dev_sw_netstats_tx_add(skb->dev, 1, skb->len); if (unlikely(priv->tx_stopped)) { qtnf_wake_all_queues(skb->dev); priv->tx_stopped = 0; @@ -662,7 +662,7 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget) skb_put(skb, psize); ndev = qtnf_classify_skb(bus, skb); if (likely(ndev)) { - qtnf_update_rx_stats(ndev, skb); + dev_sw_netstats_rx_add(ndev, skb->len); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); } else { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index fed6d21cd6ce..5264b0a1f098 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1228,6 +1228,17 @@ static int rt2800_check_hung(struct data_queue *queue) return queue->wd_count > 16; } +static void rt2800_update_survey(struct rt2x00_dev *rt2x00dev) +{ + struct ieee80211_channel *chan = rt2x00dev->hw->conf.chandef.chan; + struct rt2x00_chan_survey *chan_survey = + &rt2x00dev->chan_survey[chan->hw_value]; + + chan_survey->time_idle += rt2800_register_read(rt2x00dev, CH_IDLE_STA); + chan_survey->time_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA); + chan_survey->time_ext_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC); +} + void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; @@ -1237,6 +1248,8 @@ void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; + rt2800_update_survey(rt2x00dev); + queue_for_each(rt2x00dev, queue) { switch (queue->qid) { case QID_AC_VO: @@ -5553,6 +5566,12 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev, rt2800_config_lna_gain(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_CHANNEL) { + /* + * To provide correct survey data for survey-based ACS algorithm + * we have to save survey data for current channel before switching. + */ + rt2800_update_survey(rt2x00dev); + rt2800_config_channel(rt2x00dev, libconf->conf, &libconf->rf, &libconf->channel); rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan, @@ -10111,12 +10130,20 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) } /* - * Create channel information array + * Create channel information and survey arrays */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; + rt2x00dev->chan_survey = + kcalloc(spec->num_channels, sizeof(struct rt2x00_chan_survey), + GFP_KERNEL); + if (!rt2x00dev->chan_survey) { + kfree(info); + return -ENOMEM; + } + spec->channels_info = info; default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); @@ -10503,27 +10530,30 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct rt2x00_dev *rt2x00dev = hw->priv; - struct ieee80211_conf *conf = &hw->conf; - u32 idle, busy, busy_ext; + struct rt2x00_chan_survey *chan_survey = + &rt2x00dev->chan_survey[idx]; + enum nl80211_band band = NL80211_BAND_2GHZ; - if (idx != 0) + if (idx >= rt2x00dev->bands[band].n_channels) { + idx -= rt2x00dev->bands[band].n_channels; + band = NL80211_BAND_5GHZ; + } + + if (idx >= rt2x00dev->bands[band].n_channels) return -ENOENT; - survey->channel = conf->chandef.chan; + if (idx == 0) + rt2800_update_survey(rt2x00dev); - idle = rt2800_register_read(rt2x00dev, CH_IDLE_STA); - busy = rt2800_register_read(rt2x00dev, CH_BUSY_STA); - busy_ext = rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC); + survey->channel = &rt2x00dev->bands[band].channels[idx]; - if (idle || busy) { - survey->filled = SURVEY_INFO_TIME | - SURVEY_INFO_TIME_BUSY | - SURVEY_INFO_TIME_EXT_BUSY; + survey->filled = SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_EXT_BUSY; - survey->time = (idle + busy) / 1000; - survey->time_busy = busy / 1000; - survey->time_ext_busy = busy_ext / 1000; - } + survey->time = div_u64(chan_survey->time_idle + chan_survey->time_busy, 1000); + survey->time_busy = div_u64(chan_survey->time_busy, 1000); + survey->time_ext_busy = div_u64(chan_survey->time_ext_busy, 1000); if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) survey->filled |= SURVEY_INFO_IN_USE; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 780be81863b6..9f6fc40649be 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -182,6 +182,15 @@ struct rf_channel { }; /* + * Information structure for channel survey. + */ +struct rt2x00_chan_survey { + u64 time_idle; + u64 time_busy; + u64 time_ext_busy; +}; + +/* * Channel information structure */ struct channel_info { @@ -752,6 +761,7 @@ struct rt2x00_dev { */ struct ieee80211_hw *hw; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; + struct rt2x00_chan_survey *chan_survey; enum nl80211_band curr_band; int curr_freq; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c index 0ee1813e8453..6bafdd991171 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c @@ -32,7 +32,6 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: - case NL80211_IFTYPE_WDS: conf.sync = TSF_SYNC_AP_NONE; break; case NL80211_IFTYPE_STATION: diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index b04f76551ca4..61a4f1ad31e2 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -194,8 +194,7 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac, if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && - vif->type != NL80211_IFTYPE_MESH_POINT && - vif->type != NL80211_IFTYPE_WDS) + vif->type != NL80211_IFTYPE_MESH_POINT) return; /* @@ -1437,9 +1436,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif -#ifdef CONFIG_WIRELESS_WDS - BIT(NL80211_IFTYPE_WDS) | -#endif BIT(NL80211_IFTYPE_AP); rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 2f68a31072ae..dea5babd30fe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -408,8 +408,7 @@ static void rt2x00mac_set_tim_iter(void *data, u8 *mac, if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && - vif->type != NL80211_IFTYPE_MESH_POINT && - vif->type != NL80211_IFTYPE_WDS) + vif->type != NL80211_IFTYPE_MESH_POINT) return; set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 3b6100e6c8f6..d4d389e8f1b4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -941,6 +941,7 @@ void rt2x00queue_unpause_queue(struct data_queue *queue) * receive frames. */ queue->rt2x00dev->ops->lib->kick_queue(queue); + break; default: break; } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index bf3fbd14eda3..590bd974d94f 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -877,10 +877,10 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, switch (ccsindex = get_free_tx_ccs(local)) { case ECCSBUSY: pr_debug("ray_hw_xmit tx_ccs table busy\n"); - /* fall through */ + fallthrough; case ECCSFULL: pr_debug("ray_hw_xmit No free tx ccs\n"); - /* fall through */ + fallthrough; case ECARDGONE: netif_stop_queue(dev); return XMIT_NO_CCS; @@ -1272,7 +1272,7 @@ static int ray_set_mode(struct net_device *dev, struct iw_request_info *info, switch (wrqu->mode) { case IW_MODE_ADHOC: card_mode = 0; - /* Fall through */ + fallthrough; case IW_MODE_INFRA: local->sparm.b5.a_network_type = card_mode; break; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index fb57cc8b2e47..7a71f063015a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1628,17 +1628,17 @@ static void btc8723b2ant_action_wifi_link_process(struct btc_coexist static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1; u8 ap_num = 0; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset - coex_dm->switch_thres_offset; - bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); + btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); @@ -2764,10 +2764,10 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, /* SCO only or SCO+PAN(HS) */ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 bt_rssi_state; u32 wifi_bw; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); bt_rssi_state = btc8723b2ant_bt_rssi_state( btcoexist, 2, BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset, @@ -2807,12 +2807,12 @@ static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist) static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, bt_rssi_state; + u8 bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0); @@ -2852,13 +2852,13 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 ap_num = 0; u8 tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, 40, 0); bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0); @@ -2926,12 +2926,12 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist) static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - @@ -2973,12 +2973,12 @@ static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist) static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - @@ -3025,13 +3025,13 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist) /* PAN(HS) only */ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; @@ -3063,12 +3063,12 @@ static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist) /* PAN(EDR) + A2DP */ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - @@ -3118,12 +3118,12 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist) static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - @@ -3182,12 +3182,12 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist) /* HID + A2DP + PAN(EDR) */ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - @@ -3241,13 +3241,13 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1, bt_rssi_state; u32 wifi_bw; u8 ap_num = 0; u8 tmp = BT_8723B_2ANT_WIFI_RSSI_COEXSWITCH_THRES - coex_dm->switch_thres_offset; - wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8723b2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist, 1, 2, tmp, 0); tmp = BT_8723B_2ANT_BT_RSSI_COEXSWITCH_THRES - diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 9f5e85be9764..a18dffc8753a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1901,7 +1901,6 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) bool increase_scan_dev_num = false; bool bt_ctrl_agg_buf_size = false; u8 agg_buf_size = 5; - u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH; u32 wifi_link_status = 0; u32 num_of_wifi_link = 0; bool wifi_under_5g = false; @@ -1962,8 +1961,7 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); } else { if (wifi_connected) { - wifi_rssi_state = - btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2, + btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2, 30, 0); btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 1, 1, diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index e53789f11b08..447caa4aad32 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -1448,17 +1448,15 @@ static void btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist) static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; bool wifi_connected = false; bool low_pwr_disable = true; bool scan = false, link = false, roam = false; - wifi_rssi_state = - btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); - bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, - 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); + btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable); @@ -1516,15 +1514,14 @@ static void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist) static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state1; u8 ap_num = 0; - wifi_rssi_state = - btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); + btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES - 20, 0); - bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, - 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); + btc8821a2ant_bt_rssi_state(btcoexist, + 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num); @@ -2987,11 +2984,11 @@ static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist) /* PAN(HS) only */ static void btc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist) { - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state, bt_rssi_state; u32 wifi_bw; wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 2, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 0); @@ -3274,11 +3271,11 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist) static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist) { u32 wifi_bw; - u8 wifi_rssi_state, wifi_rssi_state1, bt_rssi_state; + u8 wifi_rssi_state, bt_rssi_state; u8 ap_num = 0; wifi_rssi_state = btc8821a2ant_wifi_rssi_state(btcoexist, 0, 2, 15, 0); - wifi_rssi_state1 = btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, + btc8821a2ant_wifi_rssi_state(btcoexist, 1, 2, BT_8821A_2ANT_WIFI_RSSI_COEXSWITCH_THRES, 0); bt_rssi_state = btc8821a2ant_bt_rssi_state(btcoexist, 3, BT_8821A_2ANT_BT_RSSI_COEXSWITCH_THRES, 37); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 2c05369b79e4..be4c0e60d44d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -47,30 +47,17 @@ static bool is_any_client_connect_to_ap(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; struct rtl_mac *mac = rtl_mac(rtlpriv); - struct rtl_sta_info *drv_priv; - u8 cnt = 0; + bool ret = false; if (mac->opmode == NL80211_IFTYPE_ADHOC || mac->opmode == NL80211_IFTYPE_MESH_POINT || mac->opmode == NL80211_IFTYPE_AP) { - if (in_interrupt() > 0) { - list_for_each_entry(drv_priv, &rtlpriv->entry_list, - list) { - cnt++; - } - } else { - spin_lock_bh(&rtlpriv->locks.entry_list_lock); - list_for_each_entry(drv_priv, &rtlpriv->entry_list, - list) { - cnt++; - } - spin_unlock_bh(&rtlpriv->locks.entry_list_lock); - } + spin_lock_bh(&rtlpriv->locks.entry_list_lock); + if (!list_empty(&rtlpriv->entry_list)) + ret = true; + spin_unlock_bh(&rtlpriv->locks.entry_list_lock); } - if (cnt > 0) - return true; - else - return false; + return ret; } static bool halbtc_legacy(struct rtl_priv *adapter) @@ -253,9 +240,6 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code, rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms); - if (in_interrupt()) - return false; - if (wait_for_completion_timeout(&btcoexist->bt_mp_comp, msecs_to_jiffies(wait_ms)) == 0) { rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 63f9ea21962f..bd9160b166c5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1226,7 +1226,6 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw, default: pr_err("Network type %d not support!\n", type); return 1; - break; } /* MSR_INFRA == Link in infrastructure network; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index 9be032e8ec95..12d0b3a87af7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -1348,7 +1348,7 @@ static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) { - u32 reg_eac, reg_e94, reg_e9c, reg_ea4; + u32 reg_eac, reg_e94, reg_e9c; u8 result = 0x00; rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c); @@ -1365,7 +1365,7 @@ static u8 _rtl88e_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); - reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); + rtl_get_bbreg(hw, 0xea4, MASKDWORD); if (!(reg_eac & BIT(28)) && (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index b9775eec4c54..c948dafa0c80 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -674,12 +674,12 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 fw_queue = QSLT_BEACON; __le32 *pdesc = (__le32 *)pdesc8; - dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; + dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index c0635309a92d..4165175cf5c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -527,12 +527,12 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 fw_queue = QSLT_BEACON; __le32 *pdesc = (__le32 *)pdesc8; - dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; + dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 2890a495a23e..8d2c6d8d32d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -113,7 +113,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw) /** * writeLLT - LLT table write access - * @io: io callback + * @hw: Pointer to the ieee80211_hw structure. * @address: LLT logical address. * @data: LLT data content * @@ -145,11 +145,10 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) /** * rtl92c_init_LLT_table - Init LLT table - * @io: io callback - * @boundary: + * @hw: Pointer to the ieee80211_hw structure. + * @boundary: Page boundary. * * Realtek hardware access function. - * */ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 1ad0cf37f60b..87f959d5d861 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -448,7 +448,7 @@ static void _rtl_fill_usb_tx_desc(__le32 *txdesc) set_tx_desc_first_seg(txdesc, 1); } -/** +/* * For HW recovery information */ static void _rtl_tx_desc_checksum(__le32 *txdesc) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index b3f25a228532..6cc9c7649eda 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c @@ -986,18 +986,19 @@ old_index_done: rtlpriv->dm.cck_index); } for (i = 0; i < rf; i++) { - if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) + if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) { ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1; - else if (ofdm_index[i] < ofdm_min_index) + } else if (internal_pa || + rtlhal->current_bandtype == BAND_ON_2_4G) { + if (ofdm_index[i] < ofdm_min_index_internal_pa) + ofdm_index[i] = ofdm_min_index_internal_pa; + } else if (ofdm_index[i] < ofdm_min_index) { ofdm_index[i] = ofdm_min_index; + } } if (rtlhal->current_bandtype == BAND_ON_2_4G) { if (cck_index > CCK_TABLE_SIZE - 1) { cck_index = CCK_TABLE_SIZE - 1; - } else if (internal_pa || - rtlhal->current_bandtype == BAND_ON_2_4G) { - if (ofdm_index[i] < ofdm_min_index_internal_pa) - ofdm_index[i] = ofdm_min_index_internal_pa; } else if (cck_index < 0) { cck_index = 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index e34d33e73e52..68ec009ea157 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -2566,7 +2566,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t) } RTPRINT(rtlpriv, FINIT, INIT_IQK, "PHY_LCK finish delay for %d ms=2\n", timecount); - u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK); + rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK); if (index == 0 && rtlhal->interfaceindex == 0) { RTPRINT(rtlpriv, FINIT, INIT_IQK, "path-A / 5G LCK\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 8944712274b5..c02813fba934 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -664,12 +664,14 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 fw_queue = QSLT_BEACON; - dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; __le32 *pdesc = (__le32 *)pdesc8; + dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index a36dc6e726d2..f8a1de6e9849 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1132,7 +1132,6 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw, default: pr_err("Network type %d not support!\n", type); return 1; - break; } /* MSR_INFRA == Link in infrastructure network; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index fa0eed434d4f..fe9b407dc2af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -1147,10 +1147,8 @@ static void _rtl8723e_phy_iq_calibrate(struct ieee80211_hw *hw, const u32 retrycount = 2; - u32 bbvalue; - if (t == 0) { - bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD); + rtl_get_bbreg(hw, 0x800, MASKDWORD); rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup, 16); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index e3ee91b7ea8d..340b3d68a54e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -528,12 +528,12 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 fw_queue = QSLT_BEACON; __le32 *pdesc = (__le32 *)pdesc8; - dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, - skb->len, DMA_TO_DEVICE); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); __le16 fc = hdr->frame_control; + dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) { rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index f09f55b0468a..2b9313cb93db 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -2178,7 +2178,7 @@ static u8 _get_right_chnl_place_for_iqk(u8 chnl) static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) { u8 tmpreg; - u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + u32 rf_a_mode = 0, rf_b_mode = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); tmpreg = rtl_read_byte(rtlpriv, 0xd03); @@ -2202,7 +2202,7 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, (rf_b_mode & 0x8FFFF) | 0x10000); } - lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); + rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0); rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 559ab78687c3..5a7cd270575a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -50,7 +50,6 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, pstatus->rx_mimo_signalquality[1] = -1; if (is_cck) { - u8 cck_highpwr; u8 cck_agc_rpt; cck_agc_rpt = p_phystrpt->cck_agc_rpt_ofdm_cfosho_a; @@ -59,8 +58,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, /* (2)PWDB, Average PWDB cacluated by * hardware (for rate adaptive) */ - cck_highpwr = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, - BIT(9)); + rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BIT(9)); lan_idx = ((cck_agc_rpt & 0xE0) >> 5); vga_idx = (cck_agc_rpt & 0x1f); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index f41a7643b9c4..372d6f8caf06 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -62,7 +62,7 @@ static void rtl8812ae_fixspur(struct ieee80211_hw *hw, rtl_set_bbreg(hw, RRFMOD, 0xC00, 0x2); /* 0x8AC[11:10] = 2'b10*/ - /* <20120914, Kordan> A workarould to resolve + /* <20120914, Kordan> A workaround to resolve * 2480Mhz spur by setting ADC clock as 160M. (Asked by Binson) */ if (band_width == HT_CHANNEL_WIDTH_20 && @@ -82,7 +82,7 @@ static void rtl8812ae_fixspur(struct ieee80211_hw *hw, /*0x8C4[30] = 0*/ } } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - /* <20120914, Kordan> A workarould to resolve + /* <20120914, Kordan> A workaround to resolve * 2480Mhz spur by setting ADC clock as 160M. */ if (band_width == HT_CHANNEL_WIDTH_20 && @@ -594,11 +594,10 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtlpriv); u8 current_band = rtlhal->current_bandtype; - u32 txpath, rxpath; s8 bb_diff_between_band; - txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0); - rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000); + rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0); + rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000); rtlhal->current_bandtype = (enum band_type) band; /* reconfig BB/RF according to wireless mode */ if (rtlhal->current_bandtype == BAND_ON_2_4G) { @@ -1581,7 +1580,7 @@ static void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw } /* string is in decimal */ -static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) +static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint) { u16 i = 0; *pint = 0; @@ -1599,7 +1598,7 @@ static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) return true; } -static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num) +static bool _rtl8812ae_eq_n_byte(const char *str1, const char *str2, u32 num) { if (num == 0) return false; @@ -1637,10 +1636,11 @@ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, return channel_index; } -static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation, - u8 *pband, u8 *pbandwidth, - u8 *prate_section, u8 *prf_path, - u8 *pchannel, u8 *ppower_limit) +static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, + const char *pregulation, + const char *pband, const char *pbandwidth, + const char *prate_section, const char *prf_path, + const char *pchannel, const char *ppower_limit) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -1648,8 +1648,8 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul u8 channel_index; s8 power_limit = 0, prev_power_limit, ret; - if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) || - !_rtl8812ae_get_integer_from_string((char *)ppower_limit, + if (!_rtl8812ae_get_integer_from_string(pchannel, &channel) || + !_rtl8812ae_get_integer_from_string(ppower_limit, &power_limit)) { rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Illegal index of pwr_lmt table [chnl %d][val %d]\n", @@ -1659,42 +1659,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul power_limit = power_limit > MAX_POWER_INDEX ? MAX_POWER_INDEX : power_limit; - if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("FCC"), 3)) + if (_rtl8812ae_eq_n_byte(pregulation, "FCC", 3)) regulation = 0; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("MKK"), 3)) + else if (_rtl8812ae_eq_n_byte(pregulation, "MKK", 3)) regulation = 1; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("ETSI"), 4)) + else if (_rtl8812ae_eq_n_byte(pregulation, "ETSI", 4)) regulation = 2; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("WW13"), 4)) + else if (_rtl8812ae_eq_n_byte(pregulation, "WW13", 4)) regulation = 3; - if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("CCK"), 3)) + if (_rtl8812ae_eq_n_byte(prate_section, "CCK", 3)) rate_section = 0; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("OFDM"), 4)) + else if (_rtl8812ae_eq_n_byte(prate_section, "OFDM", 4)) rate_section = 1; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) && + _rtl8812ae_eq_n_byte(prf_path, "1T", 2)) rate_section = 2; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) && + _rtl8812ae_eq_n_byte(prf_path, "2T", 2)) rate_section = 3; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) && + _rtl8812ae_eq_n_byte(prf_path, "1T", 2)) rate_section = 4; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) && + _rtl8812ae_eq_n_byte(prf_path, "2T", 2)) rate_section = 5; - if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("20M"), 3)) + if (_rtl8812ae_eq_n_byte(pbandwidth, "20M", 3)) bandwidth = 0; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("40M"), 3)) + else if (_rtl8812ae_eq_n_byte(pbandwidth, "40M", 3)) bandwidth = 1; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("80M"), 3)) + else if (_rtl8812ae_eq_n_byte(pbandwidth, "80M", 3)) bandwidth = 2; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("160M"), 4)) + else if (_rtl8812ae_eq_n_byte(pbandwidth, "160M", 4)) bandwidth = 3; - if (_rtl8812ae_eq_n_byte(pband, (u8 *)("2.4G"), 4)) { + if (_rtl8812ae_eq_n_byte(pband, "2.4G", 4)) { ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_2_4G, channel); @@ -1718,7 +1718,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul regulation, bandwidth, rate_section, channel_index, rtlphy->txpwr_limit_2_4g[regulation][bandwidth] [rate_section][channel_index][RF90_PATH_A]); - } else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) { + } else if (_rtl8812ae_eq_n_byte(pband, "5G", 2)) { ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_5G, channel); @@ -1749,10 +1749,10 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul } static void _rtl8812ae_phy_config_bb_txpwr_lmt(struct ieee80211_hw *hw, - u8 *regulation, u8 *band, - u8 *bandwidth, u8 *rate_section, - u8 *rf_path, u8 *channel, - u8 *power_limit) + const char *regulation, const char *band, + const char *bandwidth, const char *rate_section, + const char *rf_path, const char *channel, + const char *power_limit) { _rtl8812ae_phy_set_txpower_limit(hw, regulation, band, bandwidth, rate_section, rf_path, channel, @@ -1765,7 +1765,7 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u32 i = 0; u32 array_len; - u8 **array; + const char **array; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { array_len = RTL8812AE_TXPWR_LMT_ARRAY_LEN; @@ -1778,13 +1778,13 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n"); for (i = 0; i < array_len; i += 7) { - u8 *regulation = array[i]; - u8 *band = array[i+1]; - u8 *bandwidth = array[i+2]; - u8 *rate = array[i+3]; - u8 *rf_path = array[i+4]; - u8 *chnl = array[i+5]; - u8 *val = array[i+6]; + const char *regulation = array[i]; + const char *band = array[i+1]; + const char *bandwidth = array[i+2]; + const char *rate = array[i+3]; + const char *rf_path = array[i+4]; + const char *chnl = array[i+5]; + const char *val = array[i+6]; _rtl8812ae_phy_config_bb_txpwr_lmt(hw, regulation, band, bandwidth, rate, rf_path, @@ -2085,12 +2085,10 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, return __rtl8821ae_phy_config_with_headerfile(hw, radioa_array_table_a, radioa_arraylen_a, _rtl8821ae_config_rf_radio_a); - break; case RF90_PATH_B: return __rtl8821ae_phy_config_with_headerfile(hw, radioa_array_table_b, radioa_arraylen_b, _rtl8821ae_config_rf_radio_b); - break; case RF90_PATH_C: case RF90_PATH_D: pr_err("switch case %#x not processed\n", rfpath); @@ -2116,7 +2114,6 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, return __rtl8821ae_phy_config_with_headerfile(hw, radioa_array_table, radioa_arraylen, _rtl8821ae_config_rf_radio_a); - break; case RF90_PATH_B: case RF90_PATH_C: @@ -2449,8 +2446,9 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, else if (band == BAND_ON_5G) channel_temp = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_5G, channel); - else if (band == BAND_ON_BOTH) + else if (band == BAND_ON_BOTH) { ;/* BAND_ON_BOTH don't care temporarily */ + } if (band_temp == -1 || regulation == -1 || bandwidth_temp == -1 || rate_section == -1 || channel_temp == -1) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index 85093b3e5373..27c8a5d96520 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -2654,7 +2654,7 @@ u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY); * TXPWR_LMT.TXT ******************************************************************************/ -u8 *RTL8812AE_TXPWR_LMT[] = { +const char *RTL8812AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "36", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", @@ -3223,7 +3223,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = { u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT); -u8 *RTL8821AE_TXPWR_LMT[] = { +const char *RTL8821AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h index 540159c25078..76c62b7c0fb2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h @@ -28,7 +28,7 @@ extern u32 RTL8821AE_AGC_TAB_ARRAY[]; extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN; extern u32 RTL8812AE_AGC_TAB_ARRAY[]; extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN; -extern u8 *RTL8812AE_TXPWR_LMT[]; +extern const char *RTL8812AE_TXPWR_LMT[]; extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN; -extern u8 *RTL8821AE_TXPWR_LMT[]; +extern const char *RTL8821AE_TXPWR_LMT[]; #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 06e073defad6..d62b87f010c9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -731,7 +731,6 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); - _rtl_usb_cleanup_rx(hw); return err; } diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index aa08fd7d9fcd..24530cafcba7 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -8,6 +8,7 @@ #include "ps.h" #include "debug.h" #include "reg.h" +#include "phy.h" static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state, u8 rssi, u8 rssi_thresh) @@ -38,7 +39,7 @@ static void rtw_coex_limited_tx(struct rtw_dev *rtwdev, struct rtw_chip_info *chip = rtwdev->chip; struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - bool wifi_under_b_mode = false; + u8 num_of_active_port = 1; if (!chip->scbd_support) return; @@ -70,17 +71,13 @@ static void rtw_coex_limited_tx(struct rtw_dev *rtwdev, /* set queue life time to avoid can't reach tx retry limit * if tx is always broken by GNT_BT */ - rtw_write8_set(rtwdev, REG_LIFETIME_EN, 0xf); + if (num_of_active_port <= 1) + rtw_write8_set(rtwdev, REG_LIFETIME_EN, 0xf); rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x0808); /* auto rate fallback step within 8 retries */ - if (wifi_under_b_mode) { - rtw_write32(rtwdev, REG_DARFRC, 0x1000000); - rtw_write32(rtwdev, REG_DARFRCH, 0x1010101); - } else { - rtw_write32(rtwdev, REG_DARFRC, 0x1000000); - rtw_write32(rtwdev, REG_DARFRCH, 0x4030201); - } + rtw_write32(rtwdev, REG_DARFRC, 0x1000000); + rtw_write32(rtwdev, REG_DARFRCH, 0x4030201); } else { rtw_write8_clr(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE); rtw_write8_clr(rtwdev, REG_LIFETIME_EN, 0xf); @@ -101,39 +98,84 @@ static void rtw_coex_limited_wl(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_dm *coex_dm = &coex->dm; - struct rtw_coex_stat *coex_stat = &coex->stat; bool tx_limit = false; bool tx_agg_ctrl = false; - if (coex->under_5g || - coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { - /* no need to limit tx */ - } else { + if (!coex->under_5g && coex_dm->bt_status != COEX_BTSTATUS_NCON_IDLE) { tx_limit = true; - if (coex_stat->bt_hid_exist || coex_stat->bt_hfp_exist || - coex_stat->bt_hid_pair_num > 0) - tx_agg_ctrl = true; + tx_agg_ctrl = true; } rtw_coex_limited_tx(rtwdev, tx_limit, tx_agg_ctrl); } -static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev) +static bool rtw_coex_freerun_check(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 bt_rssi; + u8 ant_distance = 10; + + if (coex_stat->bt_disabled) + return false; + + if (efuse->share_ant || ant_distance <= 5 || !coex_stat->wl_gl_busy) + return false; + + if (ant_distance >= 40 || coex_stat->bt_hid_pair_num >= 2) + return true; + + /* ant_distance = 5 ~ 40 */ + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]) && + COEX_RSSI_HIGH(coex_dm->bt_rssi_state[0])) + return true; + + if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX) + bt_rssi = coex_dm->bt_rssi_state[0]; + else + bt_rssi = coex_dm->bt_rssi_state[1]; + + if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + COEX_RSSI_HIGH(bt_rssi) && + coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] <= 5) + return true; + + return false; +} + +static void rtw_coex_wl_slot_extend(struct rtw_dev *rtwdev, bool enable) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; u8 para[6] = {0}; - if (coex->stop_dm) + para[0] = COEX_H2C69_WL_LEAKAP; + para[1] = PARA1_H2C69_DIS_5MS; + + if (enable) + para[1] = PARA1_H2C69_EN_5MS; + else + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + + coex_stat->wl_slot_extend = enable; + rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); +} + +static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + if (coex->manual_control || coex->stop_dm) return; - para[0] = COEX_H2C69_WL_LEAKAP; if (coex_stat->tdma_timer_base == 3 && coex_stat->wl_slot_extend) { - para[1] = PARA1_H2C69_DIS_5MS; /* disable 5ms extend */ - rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); - coex_stat->wl_slot_extend = false; - coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], set h2c 0x69 opcode 12 to turn off 5ms WL slot extend!!\n"); + rtw_coex_wl_slot_extend(rtwdev, false); return; } @@ -144,16 +186,20 @@ static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev) else coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], 5ms WL slot extend cnt = %d!!\n", + coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND]); + if (coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] == 7) { - para[1] = 0x1; /* disable 5ms extend */ - rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); - coex_stat->wl_slot_extend = false; - coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], set h2c 0x69 opcode 12 to turn off 5ms WL slot extend!!\n"); + rtw_coex_wl_slot_extend(rtwdev, false); } } else if (!coex_stat->wl_slot_extend && coex_stat->wl_cck_lock) { - para[1] = 0x0; /* enable 5ms extend */ - rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); - coex_stat->wl_slot_extend = true; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], set h2c 0x69 opcode 12 to turn on 5ms WL slot extend!!\n"); + + rtw_coex_wl_slot_extend(rtwdev, true); } } @@ -161,11 +207,48 @@ static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_dm *coex_dm = &coex->dm; - /* TODO: wait for rx_rate_change_notify implement */ - coex_stat->wl_cck_lock = false; - coex_stat->wl_cck_lock_pre = false; - coex_stat->wl_cck_lock_ever = false; + bool is_cck_lock_rate = false; + + if (coex_dm->bt_status == COEX_BTSTATUS_INQ_PAGE || + coex_stat->bt_setup_link) { + coex_stat->wl_cck_lock = false; + coex_stat->wl_cck_lock_pre = false; + return; + } + + if (coex_stat->wl_rx_rate <= COEX_CCK_2 || + coex_stat->wl_rts_rx_rate <= COEX_CCK_2) + is_cck_lock_rate = true; + + if (coex_stat->wl_connected && coex_stat->wl_gl_busy && + COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + (coex_dm->bt_status == COEX_BTSTATUS_ACL_BUSY || + coex_dm->bt_status == COEX_BTSTATUS_ACL_SCO_BUSY || + coex_dm->bt_status == COEX_BTSTATUS_SCO_BUSY)) { + if (is_cck_lock_rate) { + coex_stat->wl_cck_lock = true; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], cck locking...\n"); + + } else { + coex_stat->wl_cck_lock = false; + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], cck unlock...\n"); + } + } else { + coex_stat->wl_cck_lock = false; + } + + /* CCK lock identification */ + if (coex_stat->wl_cck_lock && !coex_stat->wl_cck_lock_pre) + ieee80211_queue_delayed_work(rtwdev->hw, &coex->wl_ccklock_work, + 3 * HZ); + + coex_stat->wl_cck_lock_pre = coex_stat->wl_cck_lock; } static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev) @@ -174,11 +257,12 @@ static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev) struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_dm_info *dm_info = &rtwdev->dm_info; u32 cnt_cck; + bool wl_cck_lock = false; /* wifi noisy environment identification */ cnt_cck = dm_info->cck_ok_cnt + dm_info->cck_err_cnt; - if (!coex_stat->wl_gl_busy) { + if (!coex_stat->wl_gl_busy && !wl_cck_lock) { if (cnt_cck > 250) { if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] < 5) coex_stat->cnt_wl[COEX_CNT_WL_NOISY2]++; @@ -211,6 +295,9 @@ static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev) coex_stat->wl_noisy_level = 1; else coex_stat->wl_noisy_level = 0; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], wl_noisy_level = %d\n", + coex_stat->wl_noisy_level); } } @@ -219,6 +306,8 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; u8 para[2] = {0}; + u8 times; + u16 tbtt_interval = coex_stat->wl_beacon_interval; if (coex_stat->tdma_timer_base == type) return; @@ -227,13 +316,33 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type) para[0] = COEX_H2C69_TDMA_SLOT; - if (type == 3) /* 4-slot */ + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], tbtt_interval = %d\n", + tbtt_interval); + + if (type == TDMA_TIMER_TYPE_4SLOT && tbtt_interval < 120) { para[1] = PARA1_H2C69_TDMA_4SLOT; /* 4-slot */ - else /* 2-slot */ + } else if (tbtt_interval < 80 && tbtt_interval > 0) { + times = 100 / tbtt_interval; + if (100 % tbtt_interval != 0) + times++; + + para[1] = FIELD_PREP(PARA1_H2C69_TBTT_TIMES, times); + } else if (tbtt_interval >= 180) { + times = tbtt_interval / 100; + if (tbtt_interval % 100 <= 80) + times--; + + para[1] = FIELD_PREP(PARA1_H2C69_TBTT_TIMES, times) | + FIELD_PREP(PARA1_H2C69_TBTT_DIV100, 1); + } else { para[1] = PARA1_H2C69_TDMA_2SLOT; + } rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): h2c_0x69 = 0x%x\n", + __func__, para[1]); + /* no 5ms_wl_slot_extend for 4-slot mode */ if (coex_stat->tdma_timer_base == 3) rtw_coex_wl_ccklock_action(rtwdev); @@ -307,6 +416,9 @@ static void rtw_coex_check_rfk(struct rtw_dev *rtwdev) if (coex_rfe->wlg_at_btg && chip->scbd_support && coex_stat->bt_iqk_state != 0xff) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], (Before Ant Setup) Delay by IQK\n"); + wait_cnt = COEX_RFK_TIMEOUT / COEX_MIN_DELAY; do { /* BT RFK */ @@ -318,6 +430,10 @@ static void rtw_coex_check_rfk(struct rtw_dev *rtwdev) if (!btk && !wlk) break; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], (Before Ant Setup) wlk = %d, btk = %d\n", + wlk, btk); + mdelay(COEX_MIN_DELAY); } while (++cnt < wait_cnt); @@ -334,9 +450,16 @@ static void rtw_coex_query_bt_info(struct rtw_dev *rtwdev) if (coex_stat->bt_disabled) return; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_fw_query_bt_info(rtwdev); } +static void rtw_coex_gnt_workaround(struct rtw_dev *rtwdev, bool force, u8 mode) +{ + rtw_coex_set_gnt_fix(rtwdev); +} + static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; @@ -352,21 +475,23 @@ static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev) } if (coex_stat->bt_disabled != bt_disabled) { - rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: BT state changed (%d) -> (%d)\n", + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT state changed (%d) -> (%d)\n", coex_stat->bt_disabled, bt_disabled); coex_stat->bt_disabled = bt_disabled; coex_stat->bt_ble_scan_type = 0; coex_dm->cur_bt_lna_lvl = 0; - } - if (!coex_stat->bt_disabled) { - coex_stat->bt_reenable = true; - ieee80211_queue_delayed_work(rtwdev->hw, - &coex->bt_reenable_work, 15 * HZ); - } else { - coex_stat->bt_mailbox_reply = false; - coex_stat->bt_reenable = false; + if (!coex_stat->bt_disabled) { + coex_stat->bt_reenable = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_reenable_work, + 15 * HZ); + } else { + coex_stat->bt_mailbox_reply = false; + coex_stat->bt_reenable = false; + } } } @@ -420,6 +545,12 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason) coex_dm->wl_rssi_state[i] = rssi_state; } + if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 || + coex_stat->wl_hi_pri_task2 || coex_stat->wl_gl_busy) + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, true); + else + rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false); + switch (reason) { case COEX_RSN_5GSCANSTART: case COEX_RSN_5GSWITCHBAND: @@ -535,6 +666,23 @@ out: return ret; } +#define case_BTSTATUS(src) \ + case COEX_BTSTATUS_##src: return #src + +static const char *rtw_coex_get_bt_status_string(u8 bt_status) +{ + switch (bt_status) { + case_BTSTATUS(NCON_IDLE); + case_BTSTATUS(CON_IDLE); + case_BTSTATUS(INQ_PAGE); + case_BTSTATUS(ACL_BUSY); + case_BTSTATUS(SCO_BUSY); + case_BTSTATUS(ACL_SCO_BUSY); + default: + return "Unknown"; + } +} + static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; @@ -551,20 +699,11 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev) rssi_state = coex_dm->bt_rssi_state[i]; rssi_step = chip->bt_rssi_step[i]; rssi = coex_stat->bt_rssi; - rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, - rssi, rssi_step); + rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, rssi, + rssi_step); coex_dm->bt_rssi_state[i] = rssi_state; } - for (i = 0; i < COEX_RSSI_STEP; i++) { - rssi_state = coex_dm->wl_rssi_state[i]; - rssi_step = chip->wl_rssi_step[i]; - rssi = rtwdev->dm_info.min_rssi; - rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state, - rssi, rssi_step); - coex_dm->wl_rssi_state[i] = rssi_state; - } - if (coex_stat->bt_ble_scan_en && coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE] % 3 == 0) { u8 scan_type; @@ -623,6 +762,7 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev) coex_dm->bt_status = COEX_BTSTATUS_INQ_PAGE; } else if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) { coex_dm->bt_status = COEX_BTSTATUS_NCON_IDLE; + coex_stat->bt_multi_link_remain = false; } else if (coex_stat->bt_info_lb2 == COEX_INFO_CONNECTION) { coex_dm->bt_status = COEX_BTSTATUS_CON_IDLE; } else if ((coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) || @@ -639,7 +779,8 @@ static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev) coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE]++; - rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: bt status(%d)\n", coex_dm->bt_status); + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(), %s!!!\n", __func__, + rtw_coex_get_bt_status_string(coex_dm->bt_status)); } static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) @@ -659,6 +800,8 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) { link = 0; + center_chan = 0; + bw = 0; } else if (center_chan <= 14) { link = 0x1; @@ -682,6 +825,9 @@ static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type) coex_dm->wl_ch_info[2] = bw; rtw_fw_wl_ch_info(rtwdev, link, center_chan, bw); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s: para[0:2] = 0x%x 0x%x 0x%x\n", __func__, link, + center_chan, bw); } static void rtw_coex_set_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) @@ -714,6 +860,8 @@ static void rtw_coex_set_bt_rx_gain(struct rtw_dev *rtwdev, u8 bt_lna_lvl) } else { rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, false); } + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): bt_rx_LNA_level = %d\n", + __func__, bt_lna_lvl); } static void rtw_coex_set_rf_para(struct rtw_dev *rtwdev, @@ -723,7 +871,7 @@ static void rtw_coex_set_rf_para(struct rtw_dev *rtwdev, struct rtw_coex_stat *coex_stat = &coex->stat; u8 offset = 0; - if (coex->freerun && coex_stat->wl_noisy_level <= 1) + if (coex->freerun && coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] <= 5) offset = 3; rtw_coex_set_wl_tx_power(rtwdev, para.wl_pwr_dec_lvl); @@ -765,11 +913,13 @@ static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control) const struct rtw_hw_reg *btg_reg = chip->btg_reg; if (wifi_control) { - rtw_write32_set(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH); + rtw_write8_set(rtwdev, REG_SYS_SDIO_CTRL + 3, + BIT_LTE_MUX_CTRL_PATH >> 24); if (btg_reg) rtw_write8_set(rtwdev, btg_reg->addr, btg_reg->mask); } else { - rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH); + rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL + 3, + BIT_LTE_MUX_CTRL_PATH >> 24); if (btg_reg) rtw_write8_clr(rtwdev, btg_reg->addr, btg_reg->mask); } @@ -777,52 +927,134 @@ static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control) static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state) { - rtw_coex_write_indirect_reg(rtwdev, 0x38, 0xc000, state); - rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0c00, state); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0xc000, state); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0c00, state); } static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state) { - rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x3000, state); - rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0300, state); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x3000, state); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state); +} + +static void rtw_btc_wltoggle_table_a(struct rtw_dev *rtwdev, bool force, + u8 table_case) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 h2c_para[6] = {0}; + u32 table_wl = 0x5a5a5a5a; + + h2c_para[0] = COEX_H2C69_TOGGLE_TABLE_A; + /* no definition */ + h2c_para[1] = 0x1; + + if (efuse->share_ant) { + if (table_case < chip->table_sant_num) + table_wl = chip->table_sant[table_case].wl; + } else { + if (table_case < chip->table_nsant_num) + table_wl = chip->table_nsant[table_case].wl; + } + + /* tell WL FW WL slot toggle table-A*/ + h2c_para[2] = (u8)u32_get_bits(table_wl, GENMASK(7, 0)); + h2c_para[3] = (u8)u32_get_bits(table_wl, GENMASK(15, 8)); + h2c_para[4] = (u8)u32_get_bits(table_wl, GENMASK(23, 16)); + h2c_para[5] = (u8)u32_get_bits(table_wl, GENMASK(31, 24)); + + rtw_fw_bt_wifi_control(rtwdev, h2c_para[0], &h2c_para[1]); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): H2C = [%02x %02x %02x %02x %02x %02x]\n", + __func__, h2c_para[0], h2c_para[1], h2c_para[2], + h2c_para[3], h2c_para[4], h2c_para[5]); +} + +#define COEX_WL_SLOT_TOGLLE 0x5a5a5aaa +static void rtw_btc_wltoggle_table_b(struct rtw_dev *rtwdev, bool force, + u8 interval, u32 table) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + u8 cur_h2c_para[6] = {0}; + u8 i; + + cur_h2c_para[0] = COEX_H2C69_TOGGLE_TABLE_B; + cur_h2c_para[1] = interval; + cur_h2c_para[2] = (u8)u32_get_bits(table, GENMASK(7, 0)); + cur_h2c_para[3] = (u8)u32_get_bits(table, GENMASK(15, 8)); + cur_h2c_para[4] = (u8)u32_get_bits(table, GENMASK(23, 16)); + cur_h2c_para[5] = (u8)u32_get_bits(table, GENMASK(31, 24)); + + coex_stat->wl_toggle_interval = interval; + + for (i = 0; i <= 5; i++) + coex_stat->wl_toggle_para[i] = cur_h2c_para[i]; + + rtw_fw_bt_wifi_control(rtwdev, cur_h2c_para[0], &cur_h2c_para[1]); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): H2C = [%02x %02x %02x %02x %02x %02x]\n", + __func__, cur_h2c_para[0], cur_h2c_para[1], cur_h2c_para[2], + cur_h2c_para[3], cur_h2c_para[4], cur_h2c_para[5]); } -static void rtw_coex_set_table(struct rtw_dev *rtwdev, u32 table0, u32 table1) +static void rtw_coex_set_table(struct rtw_dev *rtwdev, bool force, u32 table0, + u32 table1) { -#define DEF_BRK_TABLE_VAL 0xf0ffffff +#define DEF_BRK_TABLE_VAL 0xf0ffffff + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + + /* If last tdma is wl slot toggle, force write table*/ + if (!force && coex_dm->reason != COEX_RSN_LPS) { + if (table0 == rtw_read32(rtwdev, REG_BT_COEX_TABLE0) && + table1 == rtw_read32(rtwdev, REG_BT_COEX_TABLE1)) + return; + } rtw_write32(rtwdev, REG_BT_COEX_TABLE0, table0); rtw_write32(rtwdev, REG_BT_COEX_TABLE1, table1); rtw_write32(rtwdev, REG_BT_COEX_BRK_TABLE, DEF_BRK_TABLE_VAL); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): 0x6c0 = %x, 0x6c4 = %x\n", __func__, table0, + table1); } -static void rtw_coex_table(struct rtw_dev *rtwdev, u8 type) +static void rtw_coex_table(struct rtw_dev *rtwdev, bool force, u8 type) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_dm *coex_dm = &coex->dm; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_coex_stat *coex_stat = &coex->stat; coex_dm->cur_table = type; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], Coex_Table - %d\n", type); + if (efuse->share_ant) { if (type < chip->table_sant_num) - rtw_coex_set_table(rtwdev, + rtw_coex_set_table(rtwdev, force, chip->table_sant[type].bt, chip->table_sant[type].wl); } else { type = type - 100; if (type < chip->table_nsant_num) - rtw_coex_set_table(rtwdev, + rtw_coex_set_table(rtwdev, force, chip->table_nsant[type].bt, chip->table_nsant[type].wl); } + if (coex_stat->wl_slot_toggle_change) + rtw_btc_wltoggle_table_a(rtwdev, true, type); } static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable) { struct rtw_coex *coex = &rtwdev->coex; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; rtw_fw_bt_ignore_wlan_action(rtwdev, enable); @@ -841,15 +1073,18 @@ static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type, case COEX_PS_WIFI_NATIVE: /* recover to original 32k low power setting */ coex_stat->wl_force_lps_ctrl = false; - + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): COEX_PS_WIFI_NATIVE\n", __func__); rtw_leave_lps(rtwdev); break; case COEX_PS_LPS_OFF: coex_stat->wl_force_lps_ctrl = true; if (lps_mode) - rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0); + rtw_fw_coex_tdma_type(rtwdev, 0, 0, 0, 0, 0); rtw_leave_lps(rtwdev); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): COEX_PS_LPS_OFF\n", __func__); break; default: break; @@ -862,10 +1097,14 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2, struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_dm *coex_dm = &coex->dm; struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_coex_stat *coex_stat = &coex->stat; u8 ps_type = COEX_PS_WIFI_NATIVE; bool ap_enable = false; if (ap_enable && (byte1 & BIT(4) && !(byte1 & BIT(5)))) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): AP mode\n", + __func__); + byte1 &= ~BIT(4); byte1 |= BIT(5); @@ -875,12 +1114,20 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2, ps_type = COEX_PS_WIFI_NATIVE; rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0); } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): Force LPS (byte1 = 0x%x)\n", __func__, + byte1); + if (chip->pstdma_type == COEX_PSTDMA_FORCE_LPSOFF) ps_type = COEX_PS_LPS_OFF; else ps_type = COEX_PS_LPS_ON; rtw_coex_power_save_state(rtwdev, ps_type, 0x50, 0x4); } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): native power save (byte1 = 0x%x)\n", + __func__, byte1); + ps_type = COEX_PS_WIFI_NATIVE; rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0); } @@ -892,6 +1139,14 @@ static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2, coex_dm->ps_tdma_para[4] = byte5; rtw_fw_coex_tdma_type(rtwdev, byte1, byte2, byte3, byte4, byte5); + + if (byte1 & BIT(2)) { + coex_stat->wl_slot_toggle = true; + coex_stat->wl_slot_toggle_change = false; + } else { + coex_stat->wl_slot_toggle_change = coex_stat->wl_slot_toggle; + coex_stat->wl_slot_toggle = false; + } } static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) @@ -905,26 +1160,24 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) bool turn_on; bool wl_busy = false; - if (tcase & TDMA_4SLOT)/* 4-slot (50ms) mode */ - rtw_coex_tdma_timer_base(rtwdev, 3); + if (tcase & TDMA_4SLOT) /* 4-slot (50ms) mode */ + rtw_coex_tdma_timer_base(rtwdev, TDMA_TIMER_TYPE_4SLOT); else - rtw_coex_tdma_timer_base(rtwdev, 0); + rtw_coex_tdma_timer_base(rtwdev, TDMA_TIMER_TYPE_2SLOT); type = (u8)(tcase & 0xff); turn_on = (type == 0 || type == 100) ? false : true; - if (!force) { - if (turn_on == coex_dm->cur_ps_tdma_on && - type == coex_dm->cur_ps_tdma) { - return; - } - } - - /* enable TBTT interrupt */ - if (turn_on) - rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + if (!force && turn_on == coex_dm->cur_ps_tdma_on && + type == coex_dm->cur_ps_tdma) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Skip TDMA because no change TDMA(%s, %d)\n", + (coex_dm->cur_ps_tdma_on ? "on" : "off"), + coex_dm->cur_ps_tdma); + return; + } wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); if ((coex_stat->bt_a2dp_exist && @@ -934,6 +1187,10 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) else rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true); + /* update pre state */ + coex_dm->cur_ps_tdma_on = turn_on; + coex_dm->cur_ps_tdma = type; + if (efuse->share_ant) { if (type < chip->tdma_sant_num) rtw_coex_set_tdma(rtwdev, @@ -953,17 +1210,16 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) chip->tdma_nsant[n].para[4]); } - /* update pre state */ - coex_dm->cur_ps_tdma_on = turn_on; - coex_dm->cur_ps_tdma = type; - rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: coex tdma type (%d)\n", type); + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], coex tdma type(%s, %d)\n", + turn_on ? "on" : "off", type); } static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; struct rtw_coex_dm *coex_dm = &coex->dm; u8 ctrl_type = COEX_SWITCH_CTRL_MAX; u8 pos_type = COEX_SWITCH_TO_MAX; @@ -976,8 +1232,14 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) /* avoid switch coex_ctrl_owner during BT IQK */ rtw_coex_check_rfk(rtwdev); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], coex_stat->bt_disabled = 0x%x\n", + coex_stat->bt_disabled); + switch (phase) { case COEX_SET_ANT_POWERON: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_COEX_POWERON\n", __func__); /* set path control owner to BT at power-on */ if (coex_stat->bt_disabled) rtw_coex_coex_ctrl_owner(rtwdev, true); @@ -988,6 +1250,8 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_BT; break; case COEX_SET_ANT_INIT: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_COEX_INIT\n", __func__); if (coex_stat->bt_disabled) { /* set GNT_BT to SW low */ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW); @@ -1009,10 +1273,12 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_BT; break; case COEX_SET_ANT_WONLY: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_WLANONLY_INIT\n", __func__); /* set GNT_BT to SW Low */ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW); - /* Set GNT_WL to SW high */ + /* set GNT_WL to SW high */ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); /* set path control owner to wl at initial step */ @@ -1022,6 +1288,8 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_WLG; break; case COEX_SET_ANT_WOFF: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_WLAN_OFF\n", __func__); /* set path control owner to BT */ rtw_coex_coex_ctrl_owner(rtwdev, false); @@ -1029,6 +1297,8 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_NOCARE; break; case COEX_SET_ANT_2G: + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_2G_RUNTIME\n", __func__); /* set GNT_BT to PTA */ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); @@ -1042,8 +1312,11 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_NOCARE; break; case COEX_SET_ANT_5G: - /* set GNT_BT to PTA */ - rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_5G_RUNTIME\n", __func__); + + /* set GNT_BT to HW PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); /* set GNT_WL to SW high */ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); @@ -1055,8 +1328,11 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_WLA; break; case COEX_SET_ANT_2G_FREERUN: - /* set GNT_BT to SW high */ - rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_2G_FREERUN\n", __func__); + + /* set GNT_BT to HW PTA */ + rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); /* Set GNT_WL to SW high */ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH); @@ -1068,10 +1344,12 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) pos_type = COEX_SWITCH_TO_WLG_BT; break; case COEX_SET_ANT_2G_WLBT: - /* set GNT_BT to SW high */ + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s() - PHASE_2G_WLBT\n", __func__); + /* set GNT_BT to HW PTA */ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA); - /* Set GNT_WL to SW high */ + /* Set GNT_WL to HW PTA */ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA); /* set path control owner to wl at runtime step */ @@ -1085,10 +1363,58 @@ static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase) return; } - if (ctrl_type < COEX_SWITCH_CTRL_MAX && pos_type < COEX_SWITCH_TO_MAX) + if (ctrl_type < COEX_SWITCH_CTRL_MAX && pos_type < COEX_SWITCH_TO_MAX && + coex_rfe->ant_switch_exist) rtw_coex_set_ant_switch(rtwdev, ctrl_type, pos_type); } +#define case_ALGO(src) \ + case COEX_ALGO_##src: return #src + +static const char *rtw_coex_get_algo_string(u8 algo) +{ + switch (algo) { + case_ALGO(NOPROFILE); + case_ALGO(HFP); + case_ALGO(HID); + case_ALGO(A2DP); + case_ALGO(PAN); + case_ALGO(A2DP_HID); + case_ALGO(A2DP_PAN); + case_ALGO(PAN_HID); + case_ALGO(A2DP_PAN_HID); + default: + return "Unknown"; + } +} + +#define case_BT_PROFILE(src) \ + case BPM_##src: return #src + +static const char *rtw_coex_get_bt_profile_string(u8 bt_profile) +{ + switch (bt_profile) { + case_BT_PROFILE(NOPROFILE); + case_BT_PROFILE(HFP); + case_BT_PROFILE(HID); + case_BT_PROFILE(A2DP); + case_BT_PROFILE(PAN); + case_BT_PROFILE(HID_HFP); + case_BT_PROFILE(A2DP_HFP); + case_BT_PROFILE(A2DP_HID); + case_BT_PROFILE(A2DP_HID_HFP); + case_BT_PROFILE(PAN_HFP); + case_BT_PROFILE(PAN_HID); + case_BT_PROFILE(PAN_HID_HFP); + case_BT_PROFILE(PAN_A2DP); + case_BT_PROFILE(PAN_A2DP_HFP); + case_BT_PROFILE(PAN_A2DP_HID); + case_BT_PROFILE(PAN_A2DP_HID_HFP); + default: + return "Unknown"; + } +} + static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; @@ -1149,6 +1475,10 @@ static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev) break; } + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT Profile = %s => Algorithm = %s\n", + rtw_coex_get_bt_profile_string(profile_map), + rtw_coex_get_algo_string(algorithm)); return algorithm; } @@ -1158,6 +1488,9 @@ static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ table_case = 2; @@ -1168,8 +1501,7 @@ static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev) tdma_case = 100; } - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1181,13 +1513,16 @@ static void rtw_coex_action_freerun(struct rtw_dev *rtwdev) struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; u8 level = 0; + bool bt_afh_loss = true; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); if (efuse->share_ant) return; coex->freerun = true; - if (coex_stat->wl_connected) + if (bt_afh_loss) rtw_coex_update_wl_ch_info(rtwdev, COEX_MEDIA_CONNECT); rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN); @@ -1211,41 +1546,49 @@ static void rtw_coex_action_freerun(struct rtw_dev *rtwdev) else rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[level]); - rtw_coex_table(rtwdev, 100); + rtw_coex_table(rtwdev, false, 100); rtw_coex_tdma(rtwdev, false, 100); } -static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev) +static void rtw_coex_action_rf4ce(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ - table_case = 2; - tdma_case = 0; + table_case = 9; + tdma_case = 16; } else { /* Non-Shared-Ant */ table_case = 100; tdma_case = 100; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } -static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev) +static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev) { struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ - table_case = 1; + table_case = 2; tdma_case = 0; } else { /* Non-Shared-Ant */ @@ -1253,9 +1596,45 @@ static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev) tdma_case = 100; } + rtw_coex_table(rtwdev, false, table_case); + rtw_coex_tdma(rtwdev, false, tdma_case); +} + +static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 table_case, tdma_case; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + + if (efuse->share_ant) { /* Shared-Ant */ + if (coex_stat->wl_gl_busy) { + table_case = 26; + if (coex_stat->bt_hid_exist && + coex_stat->bt_profile_num == 1) { + tdma_case = 20; + } else { + tdma_case = 20; + } + } else { + table_case = 1; + tdma_case = 0; + } + } else { /* Non-Shared-Ant */ + if (coex_stat->wl_gl_busy) + table_case = 115; + else + table_case = 100; + tdma_case = 100; + } + + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1269,10 +1648,14 @@ static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) struct rtw_coex_rfe *coex_rfe = &coex->rfe; u8 table_case = 0xff, tdma_case = 0xff; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (coex_rfe->ant_switch_with_bt && coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { if (efuse->share_ant && - COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) { + COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && + coex_stat->wl_gl_busy) { table_case = 0; tdma_case = 0; } else if (!efuse->share_ant) { @@ -1283,9 +1666,7 @@ static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) if (table_case != 0xff && tdma_case != 0xff) { rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN); - rtw_coex_table(rtwdev, table_case); - rtw_coex_tdma(rtwdev, false, tdma_case); - return; + goto exit; } rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); @@ -1296,8 +1677,12 @@ static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) table_case = 10; tdma_case = 3; } else if (coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { - table_case = 6; - tdma_case = 7; + table_case = 11; + + if (coex_stat->lo_pri_rx + coex_stat->lo_pri_tx > 250) + tdma_case = 17; + else + tdma_case = 7; } else { table_case = 12; tdma_case = 7; @@ -1308,7 +1693,7 @@ static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) table_case = 112; tdma_case = 104; } else if ((coex_stat->bt_ble_scan_type & 0x2) && - coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { + coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) { table_case = 114; tdma_case = 103; } else { @@ -1317,8 +1702,8 @@ static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev) } } - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); +exit: + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1332,6 +1717,10 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) u8 table_case, tdma_case; u32 slot_type = 0; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 || coex_stat->wl_hi_pri_task2) wl_hi_pri = true; @@ -1339,7 +1728,10 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) if (efuse->share_ant) { /* Shared-Ant */ if (wl_hi_pri) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi hi-pri task\n"); table_case = 15; + if (coex_stat->bt_profile_num > 0) tdma_case = 10; else if (coex_stat->wl_hi_pri_task1) @@ -1349,6 +1741,8 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) else tdma_case = 9; } else if (coex_stat->wl_gl_busy) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi busy\n"); if (coex_stat->bt_profile_num == 0) { table_case = 12; tdma_case = 18; @@ -1363,43 +1757,53 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) tdma_case = 26; } } else if (coex_stat->wl_connected) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi connected\n"); table_case = 9; tdma_case = 27; } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi not-connected\n"); table_case = 1; tdma_case = 0; } } else { /* Non_Shared-Ant */ if (wl_hi_pri) { - table_case = 113; - if (coex_stat->bt_a2dp_exist && - !coex_stat->bt_pan_exist) - tdma_case = 111; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi hi-pri task\n"); + table_case = 114; + + if (coex_stat->bt_profile_num > 0) + tdma_case = 110; else if (coex_stat->wl_hi_pri_task1) tdma_case = 106; else if (!coex_stat->bt_page) tdma_case = 108; else tdma_case = 109; - } else if (coex_stat->wl_gl_busy) { + } else if (coex_stat->wl_gl_busy) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi busy\n"); table_case = 114; tdma_case = 121; } else if (coex_stat->wl_connected) { - table_case = 100; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi connected\n"); + table_case = 101; tdma_case = 100; } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt inq/page + wifi not-connected\n"); table_case = 101; tdma_case = 100; } } - rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: wifi hi(%d), bt page(%d)\n", + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], wifi hi(%d), bt page(%d)\n", wl_hi_pri, coex_stat->bt_page); - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } @@ -1411,6 +1815,10 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->bt_multi_link) { @@ -1431,9 +1839,7 @@ static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev) } } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1444,62 +1850,80 @@ static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev) struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; - u32 wl_bw; + u32 slot_type = 0; + bool bt_multi_link_remain = false, is_toggle_table = false; - wl_bw = rtwdev->hal.current_band_width; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->bt_ble_exist) { /* RCU */ - if (!coex_stat->wl_gl_busy) - table_case = 14; - else - table_case = 15; - - if (coex_stat->bt_a2dp_active || wl_bw == 0) - tdma_case = 18; - else if (coex_stat->wl_gl_busy) - tdma_case = 8; - else - tdma_case = 4; + if (coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] > 5) { + table_case = 26; + tdma_case = 2; + } else { + table_case = 27; + tdma_case = 9; + } } else { - if (coex_stat->bt_a2dp_active || wl_bw == 0) { - table_case = 8; - tdma_case = 4; + /* Legacy HID */ + if (coex_stat->bt_profile_num == 1 && + (coex_stat->bt_multi_link || + (coex_stat->lo_pri_rx + + coex_stat->lo_pri_tx > 360) || + coex_stat->bt_slave || + bt_multi_link_remain)) { + slot_type = TDMA_4SLOT; + table_case = 12; + tdma_case = 20; + } else if (coex_stat->bt_a2dp_active) { + table_case = 9; + tdma_case = 18; + } else if (coex_stat->bt_418_hid_exist && + coex_stat->wl_gl_busy) { + is_toggle_table = true; + slot_type = TDMA_4SLOT; + table_case = 9; + tdma_case = 24; + } else if (coex_stat->bt_ble_hid_exist && + coex_stat->wl_gl_busy) { + table_case = 32; + tdma_case = 9; } else { - /* for 4/18 HID */ - if (coex_stat->bt_418_hid_exist && - coex_stat->wl_gl_busy) - table_case = 12; - else - table_case = 10; - tdma_case = 4; + table_case = 9; + tdma_case = 9; } } } else { /* Non-Shared-Ant */ - if (coex_stat->bt_a2dp_active) { - table_case = 113; - tdma_case = 118; - } else if (coex_stat->bt_ble_exist) { + if (coex_stat->bt_ble_exist) { /* BLE */ + if (coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] > 5) { + table_case = 121; + tdma_case = 102; + } else { + table_case = 122; + tdma_case = 109; + } + } else if (coex_stat->bt_a2dp_active) { table_case = 113; - - if (coex_stat->wl_gl_busy) - tdma_case = 106; - else - tdma_case = 104; + tdma_case = 118; } else { table_case = 113; tdma_case = 104; } } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); - rtw_coex_tdma(rtwdev, false, tdma_case); + rtw_coex_table(rtwdev, false, table_case); + if (is_toggle_table) { + rtw_btc_wltoggle_table_a(rtwdev, true, table_case); + rtw_btc_wltoggle_table_b(rtwdev, false, 1, COEX_WL_SLOT_TOGLLE); + } + + rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev) @@ -1512,19 +1936,24 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev) u8 table_case, tdma_case; u32 slot_type = 0; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + slot_type = TDMA_4SLOT; + if (efuse->share_ant) { /* Shared-Ant */ - slot_type = TDMA_4SLOT; - if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) - table_case = 10; + table_case = 12; else table_case = 9; - if (coex_stat->wl_gl_busy) - tdma_case = 13; - else + if (coex_stat->wl_connecting || !coex_stat->wl_gl_busy) tdma_case = 14; + else + tdma_case = 13; } else { /* Non-Shared-Ant */ table_case = 112; @@ -1535,9 +1964,7 @@ static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev) tdma_case = 113; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } @@ -1550,6 +1977,11 @@ static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev) u8 table_case, tdma_case; bool ap_enable = false; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ if (ap_enable) { table_case = 2; @@ -1571,9 +2003,7 @@ static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev) } } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1585,6 +2015,10 @@ static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0) @@ -1595,7 +2029,7 @@ static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev) if (coex_stat->wl_gl_busy) tdma_case = 17; else - tdma_case = 19; + tdma_case = 20; } else { /* Non-Shared-Ant */ table_case = 112; @@ -1606,9 +2040,7 @@ static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev) tdma_case = 119; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1619,22 +2051,34 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev) struct rtw_coex_dm *coex_dm = &coex->dm; struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; - u8 table_case, tdma_case; + u8 table_case, tdma_case, interval = 0; u32 slot_type = 0; + bool is_toggle_table = false; + + slot_type = TDMA_4SLOT; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); if (efuse->share_ant) { /* Shared-Ant */ - slot_type = TDMA_4SLOT; - - if (coex_stat->bt_ble_exist) - table_case = 26; - else + if (coex_stat->bt_ble_exist) { + table_case = 26; /* for RCU */ + } else if (coex_stat->bt_418_hid_exist) { table_case = 9; - - if (coex_stat->wl_gl_busy) { - tdma_case = 13; + interval = 1; } else { + table_case = 9; + } + + if (coex_stat->wl_connecting || !coex_stat->wl_gl_busy) { tdma_case = 14; + } else if (coex_stat->bt_418_hid_exist) { + is_toggle_table = true; + tdma_case = 23; + } else { + tdma_case = 13; } } else { /* Non-Shared-Ant */ @@ -1649,9 +2093,11 @@ static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev) tdma_case = 113; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); + if (is_toggle_table) { + rtw_btc_wltoggle_table_a(rtwdev, true, table_case); + rtw_btc_wltoggle_table_b(rtwdev, false, interval, COEX_WL_SLOT_TOGLLE); + } rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } @@ -1662,19 +2108,37 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) struct rtw_efuse *efuse = &rtwdev->efuse; struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + bool wl_cpt_test = false, bt_cpt_test = false; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); if (efuse->share_ant) { /* Shared-Ant */ - if (coex_stat->wl_gl_busy && - coex_stat->wl_noisy_level == 0) - table_case = 14; - else - table_case = 10; + if (wl_cpt_test) { + if (coex_stat->wl_gl_busy) { + table_case = 20; + tdma_case = 17; + } else { + table_case = 10; + tdma_case = 15; + } + } else if (bt_cpt_test) { + table_case = 26; + tdma_case = 26; + } else { + if (coex_stat->wl_gl_busy && + coex_stat->wl_noisy_level == 0) + table_case = 14; + else + table_case = 10; - if (coex_stat->wl_gl_busy) - tdma_case = 15; - else - tdma_case = 20; + if (coex_stat->wl_gl_busy) + tdma_case = 15; + else + tdma_case = 20; + } } else { /* Non-Shared-Ant */ table_case = 112; @@ -1685,9 +2149,12 @@ static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev) tdma_case = 120; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + if (wl_cpt_test) + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[1]); + else + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1699,6 +2166,11 @@ static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ table_case = 9; @@ -1717,9 +2189,7 @@ static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev) tdma_case = 119; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1731,6 +2201,10 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ table_case = 10; @@ -1749,9 +2223,7 @@ static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev) tdma_case = 120; } - rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1761,6 +2233,11 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); if (efuse->share_ant) { @@ -1773,9 +2250,7 @@ static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev) tdma_case = 100; } - rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1785,6 +2260,10 @@ static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ table_case = 2; @@ -1795,9 +2274,7 @@ static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev) tdma_case = 100; } - rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1811,6 +2288,11 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev) if (coex->under_5g) return; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ table_case = 28; @@ -1821,9 +2303,7 @@ static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev) tdma_case = 100; } - rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1836,8 +2316,11 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) u8 table_case, tdma_case; u32 slot_type = 0; - if (efuse->share_ant) { - /* Shared-Ant */ + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + + if (efuse->share_ant) { /* Shared-Ant */ if (coex_stat->bt_a2dp_exist) { slot_type = TDMA_4SLOT; table_case = 9; @@ -1846,9 +2329,9 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) table_case = 9; tdma_case = 7; } - } else { - /* Non-Shared-Ant */ + } else { /* Non-Shared-Ant */ if (coex_stat->bt_a2dp_exist) { + slot_type = TDMA_4SLOT; table_case = 112; tdma_case = 111; } else { @@ -1857,9 +2340,7 @@ static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev) } } - rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case | slot_type); } @@ -1869,6 +2350,10 @@ static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev) struct rtw_chip_info *chip = rtwdev->chip; u8 table_case, tdma_case; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); + rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G); + rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); + if (efuse->share_ant) { /* Shared-Ant */ table_case = 1; @@ -1879,9 +2364,7 @@ static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev) tdma_case = 100; } - rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); - rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]); - rtw_coex_table(rtwdev, table_case); + rtw_coex_table(rtwdev, false, table_case); rtw_coex_tdma(rtwdev, false, tdma_case); } @@ -1889,17 +2372,9 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - struct rtw_coex_dm *coex_dm = &coex->dm; - struct rtw_efuse *efuse = &rtwdev->efuse; u8 algorithm; - /* Non-Shared-Ant */ - if (!efuse->share_ant && coex_stat->wl_gl_busy && - COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) && - COEX_RSSI_HIGH(coex_dm->bt_rssi_state[0])) { - rtw_coex_action_freerun(rtwdev); - return; - } + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); algorithm = rtw_coex_algorithm(rtwdev); @@ -1908,10 +2383,15 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev) rtw_coex_action_bt_hfp(rtwdev); break; case COEX_ALGO_HID: - rtw_coex_action_bt_hid(rtwdev); + if (rtw_coex_freerun_check(rtwdev)) + rtw_coex_action_freerun(rtwdev); + else + rtw_coex_action_bt_hid(rtwdev); break; case COEX_ALGO_A2DP: - if (coex_stat->bt_a2dp_sink) + if (rtw_coex_freerun_check(rtwdev)) + rtw_coex_action_freerun(rtwdev); + else if (coex_stat->bt_a2dp_sink) rtw_coex_action_bt_a2dpsink(rtwdev); else rtw_coex_action_bt_a2dp(rtwdev); @@ -1920,7 +2400,10 @@ static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev) rtw_coex_action_bt_pan(rtwdev); break; case COEX_ALGO_A2DP_HID: - rtw_coex_action_bt_a2dp_hid(rtwdev); + if (rtw_coex_freerun_check(rtwdev)) + rtw_coex_action_freerun(rtwdev); + else + rtw_coex_action_bt_a2dp_hid(rtwdev); break; case COEX_ALGO_A2DP_PAN: rtw_coex_action_bt_a2dp_pan(rtwdev); @@ -1943,6 +2426,7 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_dm *coex_dm = &coex->dm; struct rtw_coex_stat *coex_stat = &coex->stat; + bool rf4ce_en = false; lockdep_assert_held(&rtwdev->mutex); @@ -1951,20 +2435,38 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) coex_dm->reason = reason; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): reason = %d\n", __func__, + reason); + /* update wifi_link_info_ext variable */ rtw_coex_update_wl_link_info(rtwdev, reason); rtw_coex_monitor_bt_enable(rtwdev); - if (coex->stop_dm) + if (coex->manual_control) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for Manual CTRL!!\n"); return; + } - if (coex_stat->wl_under_ips) + if (coex->stop_dm) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for Stop Coex DM!!\n"); return; + } + + if (coex_stat->wl_under_ips) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for wifi is under IPS!!\n"); + return; + } if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO && - !coex_stat->bt_setup_link) + !coex_stat->bt_setup_link) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], return for coex_freeze!!\n"); return; + } coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++; coex->freerun = false; @@ -1976,10 +2478,16 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) goto exit; } + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], WiFi is single-port 2G!!\n"); coex_stat->wl_coex_mode = COEX_WLINK_2G1PORT; - rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false); + if (coex_stat->bt_disabled) { - rtw_coex_action_wl_only(rtwdev); + if (coex_stat->wl_connected && rf4ce_en) + rtw_coex_action_rf4ce(rtwdev); + else if (!coex_stat->wl_connected) + rtw_coex_action_wl_not_connected(rtwdev); + else + rtw_coex_action_wl_only(rtwdev); goto exit; } @@ -2010,18 +2518,21 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) goto exit; } - if (coex_stat->wl_linkscan_proc) { + if (coex_stat->wl_linkscan_proc && !coex->freerun) { rtw_coex_action_wl_linkscan(rtwdev); goto exit; } - if (coex_stat->wl_connected) + if (coex_stat->wl_connected) { rtw_coex_action_wl_connected(rtwdev); - else + goto exit; + } else { rtw_coex_action_wl_not_connected(rtwdev); + goto exit; + } exit: - rtw_coex_set_gnt_fix(rtwdev); + rtw_coex_gnt_workaround(rtwdev, false, coex_stat->wl_coex_mode); rtw_coex_limited_wl(rtwdev); } @@ -2048,14 +2559,26 @@ static void rtw_coex_init_coex_var(struct rtw_dev *rtwdev) coex_dm->wl_rssi_state[i] = COEX_RSSI_STATE_LOW; coex_stat->wl_coex_mode = COEX_WLINK_MAX; + coex_stat->wl_rx_rate = DESC_RATE5_5M; + coex_stat->wl_rts_rx_rate = DESC_RATE5_5M; } static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) { struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); rtw_coex_init_coex_var(rtwdev); + + coex_stat->kt_ver = u8_get_bits(rtw_read8(rtwdev, 0xf1), GENMASK(7, 4)); + rtw_coex_monitor_bt_enable(rtwdev); + rtw_coex_wl_slot_extend(rtwdev, coex_stat->wl_slot_extend); + + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + rtw_coex_set_rfe_type(rtwdev); rtw_coex_set_init(rtwdev); @@ -2073,21 +2596,24 @@ static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF); rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false); coex->stop_dm = true; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): RF Off\n", + __func__); } else if (wifi_only) { rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WONLY); - rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN, + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); coex->stop_dm = true; } else { rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_INIT); - rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN, + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); coex->stop_dm = false; coex->freeze = true; } /* PTA parameter */ - rtw_coex_table(rtwdev, 0); + rtw_coex_table(rtwdev, true, 1); rtw_coex_tdma(rtwdev, true, 0); rtw_coex_query_bt_info(rtwdev); } @@ -2095,12 +2621,16 @@ static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; + u8 table_case = 1; + + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); coex->stop_dm = true; coex->wl_rf_off = false; /* enable BB, we can write 0x948 */ - rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT(0) | BIT(1)); + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, + BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB); rtw_coex_monitor_bt_enable(rtwdev); rtw_coex_set_rfe_type(rtwdev); @@ -2108,8 +2638,10 @@ void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) /* set antenna path to BT */ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_POWERON); + rtw_coex_table(rtwdev, true, table_case); /* red x issue */ rtw_write8(rtwdev, 0xff1a, 0x0); + rtw_coex_set_gnt_debug(rtwdev); } void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) @@ -2122,10 +2654,12 @@ void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; if (type == COEX_IPS_ENTER) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], IPS ENTER notify\n"); + coex_stat->wl_under_ips = true; /* for lps off */ @@ -2134,11 +2668,11 @@ void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type) rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF); rtw_coex_action_coex_all_off(rtwdev); } else if (type == COEX_IPS_LEAVE) { - rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], IPS LEAVE notify\n"); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); /* run init hw config (exclude wifi only) */ __rtw_coex_init_hw_config(rtwdev, false); - /* sw all off */ coex_stat->wl_under_ips = false; } @@ -2149,10 +2683,12 @@ void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; if (type == COEX_LPS_ENABLE) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], LPS ENABLE notify\n"); + coex_stat->wl_under_lps = true; if (coex_stat->wl_force_lps_ctrl) { @@ -2161,10 +2697,13 @@ void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type) } else { /* for native ps */ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_WLBUSY, false); rtw_coex_run_coex(rtwdev, COEX_RSN_LPS); } } else if (type == COEX_LPS_DISABLE) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], LPS DISABLE notify\n"); + coex_stat->wl_under_lps = false; /* for lps off */ @@ -2172,6 +2711,8 @@ void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type) if (!coex_stat->wl_force_lps_ctrl) rtw_coex_query_bt_info(rtwdev); + + rtw_coex_run_coex(rtwdev, COEX_RSN_LPS); } } @@ -2180,25 +2721,34 @@ void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; coex->freeze = false; - - if (type != COEX_SCAN_FINISH) - rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN | - COEX_SCBD_ONOFF, true); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); if (type == COEX_SCAN_START_5G) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], SCAN START notify (5G)\n"); + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); rtw_coex_run_coex(rtwdev, COEX_RSN_5GSCANSTART); } else if ((type == COEX_SCAN_START_2G) || (type == COEX_SCAN_START)) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], SCAN START notify (2G)\n"); + coex_stat->wl_hi_pri_task2 = true; /* Force antenna setup for no scan result issue */ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); rtw_coex_run_coex(rtwdev, COEX_RSN_2GSCANSTART); } else { + coex_stat->cnt_wl[COEX_CNT_WL_SCANAP] = 30; /* To do */ + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], SCAN FINISH notify (Scan-AP = %d)\n", + coex_stat->cnt_wl[COEX_CNT_WL_SCANAP]); + coex_stat->wl_hi_pri_task2 = false; rtw_coex_run_coex(rtwdev, COEX_RSN_SCANFINISH); } @@ -2208,9 +2758,20 @@ void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type) { struct rtw_coex *coex = &rtwdev->coex; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; + if (type == COEX_SWITCH_TO_5G) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): TO_5G\n", + __func__); + } else if (type == COEX_SWITCH_TO_24G_NOFORSCAN) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], %s(): TO_24G_NOFORSCAN\n", __func__); + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): TO_2G\n", + __func__); + } + if (type == COEX_SWITCH_TO_5G) rtw_coex_run_coex(rtwdev, COEX_RSN_5GSWITCHBAND); else if (type == COEX_SWITCH_TO_24G_NOFORSCAN) @@ -2224,22 +2785,33 @@ void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; - rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN | - COEX_SCBD_ONOFF, true); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true); if (type == COEX_ASSOCIATE_5G_START) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 5G start\n", + __func__); + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONSTART); } else if (type == COEX_ASSOCIATE_5G_FINISH) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 5G finish\n", + __func__); + rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONFINISH); } else if (type == COEX_ASSOCIATE_START) { coex_stat->wl_hi_pri_task1 = true; + coex_stat->wl_connecting = true; coex_stat->cnt_wl[COEX_CNT_WL_CONNPKT] = 2; + coex_stat->wl_connecting = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->wl_connecting_work, 2 * HZ); + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 2G start\n", + __func__); /* Force antenna setup for no scan result issue */ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G); @@ -2254,7 +2826,10 @@ void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type) } else { coex_stat->wl_hi_pri_task1 = false; coex->freeze = false; + coex_stat->wl_connecting = false; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 2G finish\n", + __func__); rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONFINISH); } } @@ -2263,17 +2838,22 @@ void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; - u8 para[6] = {0}; - if (coex->stop_dm) + if (coex->manual_control || coex->stop_dm) return; if (type == COEX_MEDIA_CONNECT_5G) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 5G\n", __func__); + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G); rtw_coex_run_coex(rtwdev, COEX_RSN_5GMEDIA); } else if (type == COEX_MEDIA_CONNECT) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): 2G\n", __func__); + + coex_stat->wl_connecting = false; + rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true); /* Force antenna setup for no scan result issue */ @@ -2281,18 +2861,11 @@ void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type) /* Set CCK Rx high Pri */ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 1); - - /* always enable 5ms extend if connect */ - para[0] = COEX_H2C69_WL_LEAKAP; - para[1] = PARA1_H2C69_EN_5MS; /* enable 5ms extend */ - rtw_fw_bt_wifi_control(rtwdev, para[0], ¶[1]); - coex_stat->wl_slot_extend = true; rtw_coex_run_coex(rtwdev, COEX_RSN_2GMEDIA); } else { - rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false); - + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s(): disconnect!!\n", + __func__); rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 0); - rtw_coex_run_coex(rtwdev, COEX_RSN_MEDIADISCON); } @@ -2304,25 +2877,35 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_chip_info *chip = rtwdev->chip; - unsigned long bt_relink_time; + struct rtw_coex_dm *coex_dm = &coex->dm; + u32 bt_relink_time; u8 i, rsp_source = 0, type; bool inq_page = false; rsp_source = buf[0] & 0xf; if (rsp_source >= COEX_BTINFO_SRC_MAX) - rsp_source = COEX_BTINFO_SRC_WL_FW; + return; + coex_stat->cnt_bt_info_c2h[rsp_source]++; if (rsp_source == COEX_BTINFO_SRC_BT_IQK) { coex_stat->bt_iqk_state = buf[1]; - if (coex_stat->bt_iqk_state == 1) + if (coex_stat->bt_iqk_state == 0) coex_stat->cnt_bt[COEX_CNT_BT_IQK]++; else if (coex_stat->bt_iqk_state == 2) coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]++; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT IQK by bt_info, data0 = 0x%02x\n", + buf[1]); + return; } if (rsp_source == COEX_BTINFO_SRC_BT_SCBD) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT Scoreboard change notify by WL FW c2h, 0xaa = 0x%02x, 0xab = 0x%02x\n", + buf[1], buf[2]); + rtw_coex_monitor_bt_enable(rtwdev); if (coex_stat->bt_disabled != coex_stat->bt_disabled_pre) { coex_stat->bt_disabled_pre = coex_stat->bt_disabled; @@ -2331,6 +2914,24 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) return; } + if (rsp_source == COEX_BTINFO_SRC_H2C60) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], H2C 0x60 content replied by WL FW: H2C_0x60 = [%02x %02x %02x %02x %02x]\n", + buf[1], buf[2], buf[3], buf[4], buf[5]); + + for (i = 1; i <= COEX_WL_TDMA_PARA_LENGTH; i++) + coex_dm->fw_tdma_para[i - 1] = buf[i]; + return; + } + + if (rsp_source == COEX_BTINFO_SRC_WL_FW) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], bt_info reply by WL FW\n"); + + rtw_coex_update_bt_link_info(rtwdev); + return; + } + if (rsp_source == COEX_BTINFO_SRC_BT_RSP || rsp_source == COEX_BTINFO_SRC_BT_ACT) { if (coex_stat->bt_disabled) { @@ -2339,30 +2940,36 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) ieee80211_queue_delayed_work(rtwdev->hw, &coex->bt_reenable_work, 15 * HZ); + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT enable detected by bt_info\n"); } } - for (i = 0; i < length; i++) { - if (i < COEX_BTINFO_LENGTH_MAX) - coex_stat->bt_info_c2h[rsp_source][i] = buf[i]; - else - break; - } + if (length != COEX_BTINFO_LENGTH) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Bt_info length = %d invalid!!\n", length); - if (rsp_source == COEX_BTINFO_SRC_WL_FW) { - rtw_coex_update_bt_link_info(rtwdev); - rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO); return; } + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Bt_info[%d], len=%d, data=[%02x %02x %02x %02x %02x %02x]\n", + buf[0], length, buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); + + for (i = 0; i < COEX_BTINFO_LENGTH; i++) + coex_stat->bt_info_c2h[rsp_source][i] = buf[i]; + /* get the same info from bt, skip it */ if (coex_stat->bt_info_c2h[rsp_source][1] == coex_stat->bt_info_lb2 && coex_stat->bt_info_c2h[rsp_source][2] == coex_stat->bt_info_lb3 && coex_stat->bt_info_c2h[rsp_source][3] == coex_stat->bt_info_hb0 && coex_stat->bt_info_c2h[rsp_source][4] == coex_stat->bt_info_hb1 && coex_stat->bt_info_c2h[rsp_source][5] == coex_stat->bt_info_hb2 && - coex_stat->bt_info_c2h[rsp_source][6] == coex_stat->bt_info_hb3) + coex_stat->bt_info_c2h[rsp_source][6] == coex_stat->bt_info_hb3) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Return because Btinfo duplicate!!\n"); return; + } coex_stat->bt_info_lb2 = coex_stat->bt_info_c2h[rsp_source][1]; coex_stat->bt_info_lb3 = coex_stat->bt_info_c2h[rsp_source][2]; @@ -2388,6 +2995,40 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) 4 * HZ); } coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3)); + if (chip->ble_hid_profile_support) { + if (coex_stat->bt_info_lb2 & BIT(5)) { + if (coex_stat->bt_info_hb1 & BIT(0)) { + /*BLE HID*/ + coex_stat->bt_ble_hid_exist = true; + } else { + coex_stat->bt_ble_hid_exist = false; + } + coex_stat->bt_ble_exist = false; + } else if (coex_stat->bt_info_hb1 & BIT(0)) { + /*RCU*/ + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = true; + } else { + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = false; + } + } else { + if (coex_stat->bt_info_hb1 & BIT(0)) { + if (coex_stat->bt_hid_slot == 1 && + coex_stat->hi_pri_rx + 100 < coex_stat->hi_pri_tx && + coex_stat->hi_pri_rx < 100) { + coex_stat->bt_ble_hid_exist = true; + coex_stat->bt_ble_exist = false; + } else { + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = true; + } + } else { + coex_stat->bt_ble_hid_exist = false; + coex_stat->bt_ble_exist = false; + } + } + coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf; if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1) coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]++; @@ -2398,22 +3039,13 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) coex_stat->cnt_bt[COEX_CNT_BT_INQ]++; coex_stat->bt_page = ((coex_stat->bt_info_lb3 & BIT(7)) == BIT(7)); - if (coex_stat->bt_page) { + if (coex_stat->bt_page) coex_stat->cnt_bt[COEX_CNT_BT_PAGE]++; - if (coex_stat->wl_linkscan_proc || - coex_stat->wl_hi_pri_task1 || - coex_stat->wl_hi_pri_task2 || coex_stat->wl_gl_busy) - rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, true); - else - rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false); - } else { - rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false); - } /* unit: % (value-100 to translate to unit: dBm in coex info) */ if (chip->bt_rssi_type == COEX_BTRSSI_RATIO) { coex_stat->bt_rssi = coex_stat->bt_info_hb0 * 2 + 10; - } else { /* original unit: dbm -> unit: % -> value-100 in coex info */ + } else { if (coex_stat->bt_info_hb0 <= 127) coex_stat->bt_rssi = 100; else if (256 - coex_stat->bt_info_hb0 <= 100) @@ -2422,7 +3054,6 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) coex_stat->bt_rssi = 0; } - coex_stat->bt_ble_exist = ((coex_stat->bt_info_hb1 & BIT(0)) == BIT(0)); if (coex_stat->bt_info_hb1 & BIT(1)) coex_stat->cnt_bt[COEX_CNT_BT_REINIT]++; @@ -2432,11 +3063,14 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) if (coex_stat->bt_reenable) bt_relink_time = 6 * HZ; else - bt_relink_time = 2 * HZ; + bt_relink_time = 1 * HZ; ieee80211_queue_delayed_work(rtwdev->hw, &coex->bt_relink_work, bt_relink_time); + + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], Re-Link start in BT info!!\n"); } if (coex_stat->bt_info_hb1 & BIT(3)) @@ -2448,8 +3082,21 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH]++; coex_stat->bt_multi_link = ((coex_stat->bt_info_hb1 & BIT(7)) == BIT(7)); + /* for multi_link = 0 but bt pkt remain exist */ + /* Use PS-TDMA to protect WL RX */ + if (!coex_stat->bt_multi_link && coex_stat->bt_multi_link_pre) { + coex_stat->bt_multi_link_remain = true; + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_multi_link_remain_work, + 3 * HZ); + } + coex_stat->bt_multi_link_pre = coex_stat->bt_multi_link; + /* resend wifi info to bt, it is reset and lost the info */ - if ((coex_stat->bt_info_hb1 & BIT(1))) { + if (coex_stat->bt_info_hb1 & BIT(1)) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT Re-init, send wifi BW & Chnl to BT!!\n"); + if (coex_stat->wl_connected) type = COEX_MEDIA_CONNECT; else @@ -2459,8 +3106,11 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) /* if ignore_wlan_act && not set_up_link */ if ((coex_stat->bt_info_hb1 & BIT(3)) && - (!(coex_stat->bt_info_hb1 & BIT(2)))) + (!(coex_stat->bt_info_hb1 & BIT(2)))) { + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n"); rtw_coex_ignore_wlan_act(rtwdev, false); + } coex_stat->bt_opp_exist = ((coex_stat->bt_info_hb2 & BIT(0)) == BIT(0)); if (coex_stat->bt_info_hb2 & BIT(1)) @@ -2472,7 +3122,7 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) coex_stat->bt_hid_pair_num = (coex_stat->bt_info_hb2 & 0xc0) >> 6; if (coex_stat->bt_hid_pair_num > 0 && coex_stat->bt_hid_slot >= 2) coex_stat->bt_418_hid_exist = true; - else if (coex_stat->bt_hid_pair_num == 0) + else if (coex_stat->bt_hid_pair_num == 0 || coex_stat->bt_hid_slot == 1) coex_stat->bt_418_hid_exist = false; if ((coex_stat->bt_info_lb2 & 0x49) == 0x49) @@ -2493,6 +3143,9 @@ void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) u8 val; int i; + rtw_dbg(rtwdev, RTW_DBG_COEX, + "[BTCoex], WiFi Fw Dbg info = %8ph (len = %d)\n", + buf, length); if (WARN(length < 8, "invalid wl info c2h length\n")) return; @@ -2504,7 +3157,7 @@ void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) if (buf[i] >= val) coex_stat->wl_fw_dbg_info[i] = buf[i] - val; else - coex_stat->wl_fw_dbg_info[i] = val - buf[i]; + coex_stat->wl_fw_dbg_info[i] = 255 - val + buf[i]; coex_stat->wl_fw_dbg_info_pre[i] = buf[i]; } @@ -2514,13 +3167,8 @@ void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) rtw_coex_wl_ccklock_detect(rtwdev); } -void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev) +void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type) { - struct rtw_coex *coex = &rtwdev->coex; - - if (coex->stop_dm) - return; - rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); } @@ -2585,6 +3233,41 @@ void rtw_coex_bt_remain_work(struct work_struct *work) mutex_unlock(&rtwdev->mutex); } +void rtw_coex_wl_connecting_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.wl_connecting_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->wl_connecting = false; + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], WL connecting stop!!\n"); + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_bt_multi_link_remain_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_multi_link_remain_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_multi_link_remain = false; + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_wl_ccklock_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.wl_ccklock_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->wl_cck_lock = false; + mutex_unlock(&rtwdev->mutex); +} + #ifdef CONFIG_RTW88_DEBUGFS #define INFO_SIZE 80 @@ -2628,6 +3311,81 @@ static const char *rtw_coex_get_reason_string(u8 reason) } } +static u8 rtw_coex_get_table_index(struct rtw_dev *rtwdev, u32 wl_reg_6c0, + u32 wl_reg_6c4) +{ + struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 ans = 0xFF; + u8 n, i; + u32 load_bt_val; + u32 load_wl_val; + bool share_ant = efuse->share_ant; + + if (share_ant) + n = chip->table_sant_num; + else + n = chip->table_nsant_num; + + for (i = 0; i < n; i++) { + if (share_ant) { + load_bt_val = chip->table_sant[i].bt; + load_wl_val = chip->table_sant[i].wl; + } else { + load_bt_val = chip->table_nsant[i].bt; + load_wl_val = chip->table_nsant[i].wl; + } + + if (wl_reg_6c0 == load_bt_val && + wl_reg_6c4 == load_wl_val) { + ans = i; + if (!share_ant) + ans += 100; + break; + } + } + + return ans; +} + +static u8 rtw_coex_get_tdma_index(struct rtw_dev *rtwdev, u8 *tdma_para) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_chip_info *chip = rtwdev->chip; + u8 ans = 0xFF; + u8 n, i, j; + u8 load_cur_tab_val; + bool valid = false; + bool share_ant = efuse->share_ant; + + if (share_ant) + n = chip->tdma_sant_num; + else + n = chip->tdma_nsant_num; + + for (i = 0; i < n; i++) { + valid = false; + for (j = 0; j < 5; j++) { + if (share_ant) + load_cur_tab_val = chip->tdma_sant[i].para[j]; + else + load_cur_tab_val = chip->tdma_nsant[i].para[j]; + + if (*(tdma_para + j) != load_cur_tab_val) + break; + + if (j == 4) + valid = true; + } + if (valid) { + ans = i; + break; + } + } + + return ans; +} + static int rtw_coex_addr_info(struct rtw_dev *rtwdev, const struct rtw_reg_domain *reg, char addr_info[], int n) @@ -2873,6 +3631,19 @@ static void rtw_coex_vif_stat_iter(void *data, u8 *mac, &sta_iter_data); } +#define case_WLINK(src) \ + case COEX_WLINK_##src: return #src + +static const char *rtw_coex_get_wl_coex_mode(u8 coex_wl_link_mode) +{ + switch (coex_wl_link_mode) { + case_WLINK(2G1PORT); + case_WLINK(5G); + default: + return "Unknown"; + } +} + void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) { struct rtw_chip_info *chip = rtwdev->chip; @@ -2894,14 +3665,23 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) score_board_BW = rtw_coex_read_scbd(rtwdev); score_board_WB = coex_stat->score_board; - wl_reg_6c0 = rtw_read32(rtwdev, 0x6c0); - wl_reg_6c4 = rtw_read32(rtwdev, 0x6c4); - wl_reg_6c8 = rtw_read32(rtwdev, 0x6c8); - wl_reg_6cc = rtw_read32(rtwdev, 0x6cc); - wl_reg_778 = rtw_read32(rtwdev, 0x778); - bt_hi_pri = rtw_read32(rtwdev, 0x770); - bt_lo_pri = rtw_read32(rtwdev, 0x774); - rtw_write8(rtwdev, 0x76e, 0xc); + wl_reg_6c0 = rtw_read32(rtwdev, REG_BT_COEX_TABLE0); + wl_reg_6c4 = rtw_read32(rtwdev, REG_BT_COEX_TABLE1); + wl_reg_6c8 = rtw_read32(rtwdev, REG_BT_COEX_BRK_TABLE); + wl_reg_6cc = rtw_read32(rtwdev, REG_BT_COEX_TABLE_H); + wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL); + + bt_hi_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS); + bt_lo_pri = rtw_read32(rtwdev, REG_BT_ACT_STATISTICS_1); + rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL, + BIT_R_GRANTALL_WLMASK | BIT_STATIS_BT_EN); + + coex_stat->hi_pri_tx = FIELD_GET(MASKLWORD, bt_hi_pri); + coex_stat->hi_pri_rx = FIELD_GET(MASKHWORD, bt_hi_pri); + + coex_stat->lo_pri_tx = FIELD_GET(MASKLWORD, bt_lo_pri); + coex_stat->lo_pri_rx = FIELD_GET(MASKHWORD, bt_lo_pri); + sys_lte = rtw_read8(rtwdev, 0x73); lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38); bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54); @@ -2919,9 +3699,24 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) coex_stat->bt_mailbox_reply = true; } + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], %s()\n", __func__); seq_printf(m, "**********************************************\n"); seq_printf(m, "\t\tBT Coexist info %x\n", chip->id); seq_printf(m, "**********************************************\n"); + + if (coex->manual_control) { + seq_puts(m, "============[Under Manual Control]============\n"); + seq_puts(m, "==========================================\n"); + + } else if (coex->stop_dm) { + seq_puts(m, "============[Coex is STOPPED]============\n"); + seq_puts(m, "==========================================\n"); + + } else if (coex->freeze) { + seq_puts(m, "============[coex_freeze]============\n"); + seq_puts(m, "==========================================\n"); + } + seq_printf(m, "%-40s = %s/ %d\n", "Mech/ RFE", efuse->share_ant ? "Shared" : "Non-Shared", @@ -2938,15 +3733,17 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) coex_stat->bt_slave ? "Slave" : "Master", coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH], coex_dm->ignore_wl_act); - seq_printf(m, "%-40s = %u.%u/ 0x%x/ %c\n", - "WL FW/ BT FW/ KT", + seq_printf(m, "%-40s = %u.%u/ 0x%x/ 0x%x/ %c\n", + "WL FW/ BT FW/ BT FW Desired/ KT", fw->version, fw->sub_version, - coex_stat->patch_ver, coex_stat->kt_ver + 65); + coex_stat->patch_ver, + chip->wl_fw_desired_ver, coex_stat->kt_ver + 65); seq_printf(m, "%-40s = %u/ %u/ %u/ ch-(%u)\n", "AFH Map", coex_dm->wl_ch_info[0], coex_dm->wl_ch_info[1], coex_dm->wl_ch_info[2], hal->current_channel); + rtw_debugfs_get_simple_phy_info(m); seq_printf(m, "**********************************************\n"); seq_printf(m, "\t\tBT Status\n"); seq_printf(m, "**********************************************\n"); @@ -2965,6 +3762,7 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) coex_stat->bt_hid_exist ? (coex_stat->bt_ble_exist ? "HID(RCU)," : coex_stat->bt_hid_slot >= 2 ? "HID(4/18)" : + coex_stat->bt_ble_hid_exist ? "HID(BLE)" : "HID(2/18),") : "", coex_stat->bt_pan_exist ? coex_stat->bt_opp_exist ? "OPP," : "PAN," : "", @@ -2989,8 +3787,8 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) score_board_WB, score_board_BW); seq_printf(m, "%-40s = %u/%u, %u/%u\n", "Hi-Pri TX/RX, Lo-Pri TX/RX", - bt_hi_pri & 0xffff, bt_hi_pri >> 16, - bt_lo_pri & 0xffff, bt_lo_pri >> 16); + coex_stat->hi_pri_tx, coex_stat->hi_pri_rx, + coex_stat->lo_pri_tx, coex_stat->lo_pri_rx); for (i = 0; i < COEX_BTINFO_SRC_BT_IQK; i++) seq_printf(m, "%-40s = %7ph\n", rtw_coex_get_bt_info_src_string(i), @@ -3015,20 +3813,45 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) vif_iter_data.file = m; rtw_iterate_vifs_atomic(rtwdev, rtw_coex_vif_stat_iter, &vif_iter_data); - seq_printf(m, "**********************************************\n"); - seq_printf(m, "\t\tMechanism\n"); - seq_printf(m, "**********************************************\n"); - seq_printf(m, "%-40s = %5ph (case-%d)\n", - "TDMA", - coex_dm->ps_tdma_para, coex_dm->cur_ps_tdma); - seq_printf(m, "%-40s = %d\n", - "Timer base", coex_stat->tdma_timer_base); - seq_printf(m, "%-40s = %d/ 0x%08x/ 0x%08x/ 0x%08x\n", + if (coex->manual_control) { + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tMechanism (Under Manual)\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %5ph (%d)\n", + "TDMA Now", + coex_dm->fw_tdma_para, + rtw_coex_get_tdma_index(rtwdev, + &coex_dm->fw_tdma_para[0])); + } else { + seq_printf(m, "**********************************************\n"); + seq_printf(m, "\t\tMechanism\n"); + seq_printf(m, "**********************************************\n"); + seq_printf(m, "%-40s = %5ph (case-%d)\n", + "TDMA", + coex_dm->ps_tdma_para, coex_dm->cur_ps_tdma); + } + seq_printf(m, "%-40s = %s/ %s/ %d\n", + "Coex Mode/Free Run/Timer base", + rtw_coex_get_wl_coex_mode(coex_stat->wl_coex_mode), + coex->freerun ? "Yes" : "No", + coex_stat->tdma_timer_base); + seq_printf(m, "%-40s = %d(%d)/ 0x%08x/ 0x%08x/ 0x%08x\n", "Table/ 0x6c0/ 0x6c4/ 0x6c8", - coex_dm->cur_table, wl_reg_6c0, wl_reg_6c4, wl_reg_6c8); - seq_printf(m, "%-40s = 0x%08x/ 0x%08x/ reason (%s)\n", - "0x778/ 0x6cc/ Reason", - wl_reg_778, wl_reg_6cc, rtw_coex_get_reason_string(reason)); + coex_dm->cur_table, + rtw_coex_get_table_index(rtwdev, wl_reg_6c0, wl_reg_6c4), + wl_reg_6c0, wl_reg_6c4, wl_reg_6c8); + seq_printf(m, "%-40s = 0x%08x/ 0x%08x/ %d/ reason (%s)\n", + "0x778/ 0x6cc/ Run Count/ Reason", + wl_reg_778, wl_reg_6cc, + coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN], + rtw_coex_get_reason_string(reason)); + seq_printf(m, "%-40s = %3ph\n", + "AFH Map to BT", + coex_dm->wl_ch_info); + seq_printf(m, "%-40s = %s/ %d\n", + "AntDiv/ BtCtrlLPS/ g_busy", + coex_stat->wl_force_lps_ctrl ? "On" : "Off", + coex_stat->wl_gl_busy); seq_printf(m, "%-40s = %u/ %u/ %u/ %u/ %u\n", "Null All/ Retry/ Ack/ BT Empty/ BT Late", coex_stat->wl_fw_dbg_info[1], coex_stat->wl_fw_dbg_info[2], @@ -3040,6 +3863,12 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) coex_stat->wl_fw_dbg_info[7], coex_stat->wl_slot_extend ? "Yes" : "No", coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]); + seq_printf(m, "%-40s = %d/ %d/ %s/ %d\n", + "WL_TxPw/ BT_TxPw/ WL_Rx/ BT_LNA_Lvl", + coex_dm->cur_wl_pwr_lvl, + coex_dm->cur_bt_pwr_lvl, + coex_dm->cur_wl_rx_low_gain_en ? "On" : "Off", + coex_dm->cur_bt_lna_lvl); seq_printf(m, "**********************************************\n"); seq_printf(m, "\t\tHW setting\n"); @@ -3074,5 +3903,22 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) coex_stat->wl_noisy_level); rtw_coex_set_coexinfo_hw(rtwdev, m); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", + "EVM A/ EVM B/ SNR A/ SNR B", + -dm_info->rx_evm_dbm[RF_PATH_A], + -dm_info->rx_evm_dbm[RF_PATH_B], + -dm_info->rx_snr[RF_PATH_A], + -dm_info->rx_snr[RF_PATH_B]); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", + "CCK-CCA/CCK-FA/OFDM-CCA/OFDM-FA", + dm_info->cck_cca_cnt, dm_info->cck_fa_cnt, + dm_info->ofdm_cca_cnt, dm_info->ofdm_fa_cnt); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", "CRC OK CCK/11g/11n/11ac", + dm_info->cck_ok_cnt, dm_info->ofdm_ok_cnt, + dm_info->ht_ok_cnt, dm_info->vht_ok_cnt); + seq_printf(m, "%-40s = %d/ %d/ %d/ %d\n", "CRC Err CCK/11g/11n/11ac", + dm_info->cck_err_cnt, dm_info->ofdm_err_cnt, + dm_info->ht_err_cnt, dm_info->vht_err_cnt); + } #endif /* CONFIG_RTW88_DEBUGFS */ diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h index 44720fdfc053..8ab9852ec9ed 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.h +++ b/drivers/net/wireless/realtek/rtw88/coex.h @@ -5,12 +5,7 @@ #ifndef __RTW_COEX_H__ #define __RTW_COEX_H__ -/* BT profile map bit definition */ -#define BPM_HFP BIT(0) -#define BPM_HID BIT(1) -#define BPM_A2DP BIT(2) -#define BPM_PAN BIT(3) - +#define COEX_CCK_2 0x1 #define COEX_RESP_ACK_BY_WL_FW 0x1 #define COEX_REQUEST_TIMEOUT msecs_to_jiffies(10) @@ -27,10 +22,19 @@ #define COEX_H2C69_TDMA_SLOT 0xb #define PARA1_H2C69_TDMA_4SLOT 0xc1 #define PARA1_H2C69_TDMA_2SLOT 0x1 +#define PARA1_H2C69_TBTT_TIMES GENMASK(5, 0) +#define PARA1_H2C69_TBTT_DIV100 BIT(7) + +#define COEX_H2C69_TOGGLE_TABLE_A 0xd +#define COEX_H2C69_TOGGLE_TABLE_B 0x7 #define TDMA_4SLOT BIT(8) +#define TDMA_TIMER_TYPE_2SLOT 0 +#define TDMA_TIMER_TYPE_4SLOT 3 + #define COEX_RSSI_STEP 4 + #define COEX_RSSI_HIGH(rssi) \ ({ typeof(rssi) __rssi__ = rssi; \ (__rssi__ == COEX_RSSI_STATE_HIGH || \ @@ -146,6 +150,25 @@ enum coex_algorithm { COEX_ALGO_MAX }; +enum coex_bt_profile { + BPM_NOPROFILE = 0, + BPM_HFP = BIT(0), + BPM_HID = BIT(1), + BPM_A2DP = BIT(2), + BPM_PAN = BIT(3), + BPM_HID_HFP = BPM_HID | BPM_HFP, + BPM_A2DP_HFP = BPM_A2DP | BPM_HFP, + BPM_A2DP_HID = BPM_A2DP | BPM_HID, + BPM_A2DP_HID_HFP = BPM_A2DP | BPM_HID | BPM_HFP, + BPM_PAN_HFP = BPM_PAN | BPM_HFP, + BPM_PAN_HID = BPM_PAN | BPM_HID, + BPM_PAN_HID_HFP = BPM_PAN | BPM_HID | BPM_HFP, + BPM_PAN_A2DP = BPM_PAN | BPM_A2DP, + BPM_PAN_A2DP_HFP = BPM_PAN | BPM_A2DP | BPM_HFP, + BPM_PAN_A2DP_HID = BPM_PAN | BPM_A2DP | BPM_HID, + BPM_PAN_A2DP_HID_HFP = BPM_PAN | BPM_A2DP | BPM_HID | BPM_HFP, +}; + enum coex_wl_link_mode { COEX_WLINK_2G1PORT = 0x0, COEX_WLINK_5G = 0x3, @@ -365,19 +388,21 @@ void rtw_coex_bt_reenable_work(struct work_struct *work); void rtw_coex_defreeze_work(struct work_struct *work); void rtw_coex_wl_remain_work(struct work_struct *work); void rtw_coex_bt_remain_work(struct work_struct *work); +void rtw_coex_wl_connecting_work(struct work_struct *work); +void rtw_coex_bt_multi_link_remain_work(struct work_struct *work); +void rtw_coex_wl_ccklock_work(struct work_struct *work); void rtw_coex_power_on_setting(struct rtw_dev *rtwdev); void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only); void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type); void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type); void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type); -void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 action); -void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 status); -void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 len); +void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type); +void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length); void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length); void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type); -void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev); - +void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev, u32 type); void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m); #endif diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index efbba9caef3b..19fc2d8bf3e9 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -619,6 +619,29 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) return 0; } +void rtw_debugfs_get_simple_phy_info(struct seq_file *m) +{ + struct rtw_debugfs_priv *debugfs_priv = m->private; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_traffic_stats *stats = &rtwdev->stats; + + seq_printf(m, "%-40s = %ddBm/ %d\n", "RSSI/ STA Channel", + dm_info->rssi[RF_PATH_A] - 100, hal->current_channel); + + seq_printf(m, "TP {Tx, Rx} = {%u, %u}Mbps\n", + stats->tx_throughput, stats->rx_throughput); + + seq_puts(m, "[Tx Rate] = "); + rtw_print_rate(m, dm_info->tx_rate); + seq_printf(m, "(0x%x)\n", dm_info->tx_rate); + + seq_puts(m, "[Rx Rate] = "); + rtw_print_rate(m, dm_info->curr_rx_rate); + seq_printf(m, "(0x%x)\n", dm_info->curr_rx_rate); +} + static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; @@ -777,7 +800,7 @@ static ssize_t rtw_debugfs_set_coex_enable(struct file *filp, } mutex_lock(&rtwdev->mutex); - coex->stop_dm = enable == 0; + coex->manual_control = enable == 0; mutex_unlock(&rtwdev->mutex); return count; @@ -790,7 +813,7 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v) struct rtw_coex *coex = &rtwdev->coex; seq_printf(m, "coex mechanism %s\n", - coex->stop_dm ? "disabled" : "enabled"); + coex->manual_control ? "disabled" : "enabled"); return 0; } diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h index a0f36f29b4a6..e16e0da26e77 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.h +++ b/drivers/net/wireless/realtek/rtw88/debug.h @@ -26,6 +26,7 @@ enum rtw_debug_mask { #ifdef CONFIG_RTW88_DEBUGFS void rtw_debugfs_init(struct rtw_dev *rtwdev); +void rtw_debugfs_get_simple_phy_info(struct seq_file *m); #else diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index b2fd87834f23..6649b84f6b1e 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -13,6 +13,7 @@ #include "debug.h" #include "util.h" #include "wow.h" +#include "ps.h" static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb) @@ -183,6 +184,9 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, case C2H_BT_MP_INFO: rtw_coex_info_response(rtwdev, skb); break; + case C2H_WLAN_RFON: + complete(&rtwdev->lps_leave_check); + break; default: /* pass offset for further operation */ *((u32 *)skb->cb) = pkt_offset; @@ -628,7 +632,7 @@ void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable) SET_NLO_FUN_EN(h2c_pkt, enable); if (enable) { - if (rtw_fw_lps_deep_mode) + if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE) SET_NLO_PS_32K(h2c_pkt, enable); SET_NLO_IGNORE_SECURITY(h2c_pkt, enable); SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo); @@ -1473,7 +1477,7 @@ static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev, case RTW_FW_FIFO_SEL_RX: if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel]) return false; - /*fall through*/ + fallthrough; default: return true; } diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 08644540d259..39c905c1b1d8 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -31,6 +31,7 @@ enum rtw_c2h_cmd_id { C2H_RA_RPT = 0x0c, C2H_HW_FEATURE_REPORT = 0x19, C2H_WLAN_INFO = 0x27, + C2H_WLAN_RFON = 0x32, C2H_HW_FEATURE_DUMP = 0xfd, C2H_HALMAC = 0xff, }; @@ -72,6 +73,14 @@ enum rtw_fw_rf_type { FW_RF_MAX_TYPE = 0xF, }; +enum rtw_fw_feature { + FW_FEATURE_SIG = BIT(0), + FW_FEATURE_LPS_C2H = BIT(1), + FW_FEATURE_LCLK = BIT(2), + FW_FEATURE_PG = BIT(3), + FW_FEATURE_MAX = BIT(31), +}; + struct rtw_coex_info_req { u8 seq; u8 op_code; @@ -177,7 +186,7 @@ struct rtw_fw_hdr { u8 subversion; u8 subindex; __le32 rsvd; /* 0x08 */ - __le32 rsvd2; /* 0x0C */ + __le32 feature; /* 0x0C */ u8 month; /* 0x10 */ u8 day; u8 hour; diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index c92fba2fa480..1f1b639cd124 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -351,6 +351,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, { struct rtw_dev *rtwdev = hw->priv; struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_stat *coex_stat = &coex->stat; u32 config = 0; mutex_lock(&rtwdev->mutex); @@ -381,6 +383,11 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, config |= PORT_SET_BSSID; } + if (changed & BSS_CHANGED_BEACON_INT) { + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + coex_stat->wl_beacon_interval = conf->beacon_int; + } + if (changed & BSS_CHANGED_BEACON) rtw_fw_download_rsvd_page(rtwdev); @@ -519,7 +526,7 @@ static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } /* download new cam settings for PG to backup */ - if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG) + if (rtw_get_lps_deep_mode(rtwdev) == LPS_DEEP_MODE_PG) rtw_fw_download_rsvd_page(rtwdev); out: diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 565efd880624..e7c1ae454524 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -16,17 +16,17 @@ #include "debug.h" #include "bf.h" -unsigned int rtw_fw_lps_deep_mode; -EXPORT_SYMBOL(rtw_fw_lps_deep_mode); +bool rtw_disable_lps_deep_mode; +EXPORT_SYMBOL(rtw_disable_lps_deep_mode); bool rtw_bf_support = true; unsigned int rtw_debug_mask; EXPORT_SYMBOL(rtw_debug_mask); -module_param_named(lps_deep_mode, rtw_fw_lps_deep_mode, uint, 0644); +module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644); module_param_named(support_bf, rtw_bf_support, bool, 0644); module_param_named(debug_mask, rtw_debug_mask, uint, 0644); -MODULE_PARM_DESC(lps_deep_mode, "Deeper PS mode. If 0, deep PS is disabled"); +MODULE_PARM_DESC(disable_lps_deep, "Set Y to disable Deep PS"); MODULE_PARM_DESC(support_bf, "Set Y to enable beamformee support"); MODULE_PARM_DESC(debug_mask, "Debugging mask"); @@ -67,6 +67,7 @@ static struct ieee80211_channel rtw_channeltable_5g[] = { {.center_freq = 5660, .hw_value = 132,}, {.center_freq = 5680, .hw_value = 136,}, {.center_freq = 5700, .hw_value = 140,}, + {.center_freq = 5720, .hw_value = 144,}, {.center_freq = 5745, .hw_value = 149,}, {.center_freq = 5765, .hw_value = 153,}, {.center_freq = 5785, .hw_value = 157,}, @@ -196,7 +197,7 @@ static void rtw_watch_dog_work(struct work_struct *work) clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags)) - rtw_coex_wl_status_change_notify(rtwdev); + rtw_coex_wl_status_change_notify(rtwdev, 0); if (stats->tx_cnt > RTW_LPS_THRESHOLD || stats->rx_cnt > RTW_LPS_THRESHOLD) @@ -1023,6 +1024,26 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) return 0; } +static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + struct rtw_chip_info *chip = rtwdev->chip; + + if (rtw_disable_lps_deep_mode || !chip->lps_deep_mode_supported || + !fw->feature) + return LPS_DEEP_MODE_NONE; + + if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_PG)) && + (fw->feature & FW_FEATURE_PG)) + return LPS_DEEP_MODE_PG; + + if ((chip->lps_deep_mode_supported & BIT(LPS_DEEP_MODE_LCLK)) && + (fw->feature & FW_FEATURE_LCLK)) + return LPS_DEEP_MODE_LCLK; + + return LPS_DEEP_MODE_NONE; +} + static int rtw_power_on(struct rtw_dev *rtwdev) { struct rtw_chip_info *chip = rtwdev->chip; @@ -1097,6 +1118,9 @@ int rtw_core_start(struct rtw_dev *rtwdev) rtw_sec_enable_sec_engine(rtwdev); + rtwdev->lps_conf.deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->fw); + rtwdev->lps_conf.wow_deep_mode = rtw_update_lps_deep_mode(rtwdev, &rtwdev->wow_fw); + /* rcr reset after powered on */ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr); @@ -1130,6 +1154,9 @@ void rtw_core_stop(struct rtw_dev *rtwdev) cancel_delayed_work_sync(&coex->defreeze_work); cancel_delayed_work_sync(&coex->wl_remain_work); cancel_delayed_work_sync(&coex->bt_remain_work); + cancel_delayed_work_sync(&coex->wl_connecting_work); + cancel_delayed_work_sync(&coex->bt_multi_link_remain_work); + cancel_delayed_work_sync(&coex->wl_ccklock_work); mutex_lock(&rtwdev->mutex); @@ -1259,6 +1286,17 @@ static void rtw_unset_supported_band(struct ieee80211_hw *hw, kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]); } +static void __update_firmware_feature(struct rtw_dev *rtwdev, + struct rtw_fw_state *fw) +{ + u32 feature; + const struct rtw_fw_hdr *fw_hdr = + (const struct rtw_fw_hdr *)fw->firmware->data; + + feature = le32_to_cpu(fw_hdr->feature); + fw->feature = feature & FW_FEATURE_SIG ? feature : 0; +} + static void __update_firmware_info(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) { @@ -1269,6 +1307,8 @@ static void __update_firmware_info(struct rtw_dev *rtwdev, fw->version = le16_to_cpu(fw_hdr->version); fw->sub_version = fw_hdr->subversion; fw->sub_index = fw_hdr->subindex; + + __update_firmware_feature(rtwdev, fw); } static void __update_firmware_info_legacy(struct rtw_dev *rtwdev, @@ -1622,6 +1662,10 @@ int rtw_core_init(struct rtw_dev *rtwdev) INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); + INIT_DELAYED_WORK(&coex->wl_connecting_work, rtw_coex_wl_connecting_work); + INIT_DELAYED_WORK(&coex->bt_multi_link_remain_work, + rtw_coex_bt_multi_link_remain_work); + INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work); INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); @@ -1639,14 +1683,11 @@ int rtw_core_init(struct rtw_dev *rtwdev) mutex_init(&rtwdev->hal.tx_power_mutex); init_waitqueue_head(&rtwdev->coex.wait); + init_completion(&rtwdev->lps_leave_check); rtwdev->sec.total_cam_num = 32; rtwdev->hal.current_channel = 1; set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map); - if (!(BIT(rtw_fw_lps_deep_mode) & chip->lps_deep_mode_supported)) - rtwdev->lps_conf.deep_mode = LPS_DEEP_MODE_NONE; - else - rtwdev->lps_conf.deep_mode = rtw_fw_lps_deep_mode; rtw_stats_init(rtwdev); @@ -1671,6 +1712,7 @@ int rtw_core_init(struct rtw_dev *rtwdev) return ret; } } + return 0; } EXPORT_SYMBOL(rtw_core_init); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index ffb02e614217..9a318dfd04f9 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -37,7 +37,7 @@ #define RTW_TP_SHIFT 18 /* bytes/2s --> Mbps */ extern bool rtw_bf_support; -extern unsigned int rtw_fw_lps_deep_mode; +extern bool rtw_disable_lps_deep_mode; extern unsigned int rtw_debug_mask; extern const struct ieee80211_ops rtw_ops; @@ -664,6 +664,7 @@ enum rtw_pwr_state { struct rtw_lps_conf { enum rtw_lps_mode mode; enum rtw_lps_deep_mode deep_mode; + enum rtw_lps_deep_mode wow_deep_mode; enum rtw_pwr_state state; u8 awake_interval; u8 rlbm; @@ -1173,6 +1174,7 @@ struct rtw_chip_info { u8 bt_desired_ver; bool scbd_support; bool new_scbd10_def; /* true: fix 2M(8822c) */ + bool ble_hid_profile_support; u8 pstdma_type; /* 0: LPSoff, 1:LPSon */ u8 bt_rssi_type; u8 ant_isolation; @@ -1197,6 +1199,7 @@ struct rtw_chip_info { const struct coex_5g_afh_map *afh_5g; const struct rtw_hw_reg *btg_reg; const struct rtw_reg_domain *coex_info_hw_regs; + u32 wl_fw_desired_ver; }; enum rtw_coex_bt_state_cnt { @@ -1218,6 +1221,7 @@ enum rtw_coex_bt_state_cnt { }; enum rtw_coex_wl_state_cnt { + COEX_CNT_WL_SCANAP, COEX_CNT_WL_CONNPKT, COEX_CNT_WL_COEXRUN, COEX_CNT_WL_NOISY0, @@ -1240,6 +1244,8 @@ struct rtw_coex_rfe { bool wlg_at_btg; }; +#define COEX_WL_TDMA_PARA_LENGTH 5 + struct rtw_coex_dm { bool cur_ps_tdma_on; bool cur_wl_rx_low_gain_en; @@ -1259,6 +1265,7 @@ struct rtw_coex_dm { u32 cur_ant_pos_type; u32 cur_switch_status; u32 setting_tdma; + u8 fw_tdma_para[COEX_WL_TDMA_PARA_LENGTH]; }; #define COEX_BTINFO_SRC_WL_FW 0x0 @@ -1266,7 +1273,8 @@ struct rtw_coex_dm { #define COEX_BTINFO_SRC_BT_ACT 0x2 #define COEX_BTINFO_SRC_BT_IQK 0x3 #define COEX_BTINFO_SRC_BT_SCBD 0x4 -#define COEX_BTINFO_SRC_MAX 0x5 +#define COEX_BTINFO_SRC_H2C60 0x5 +#define COEX_BTINFO_SRC_MAX 0x6 #define COEX_INFO_FTP BIT(7) #define COEX_INFO_A2DP BIT(6) @@ -1277,6 +1285,7 @@ struct rtw_coex_dm { #define COEX_INFO_SCO_ESCO BIT(1) #define COEX_INFO_CONNECTION BIT(0) #define COEX_BTINFO_LENGTH_MAX 10 +#define COEX_BTINFO_LENGTH 7 struct rtw_coex_stat { bool bt_disabled; @@ -1298,6 +1307,8 @@ struct rtw_coex_stat { bool bt_fix_2M; bool bt_setup_link; bool bt_multi_link; + bool bt_multi_link_pre; + bool bt_multi_link_remain; bool bt_a2dp_sink; bool bt_a2dp_active; bool bt_reenable; @@ -1305,6 +1316,7 @@ struct rtw_coex_stat { bool bt_init_scan; bool bt_slave; bool bt_418_hid_exist; + bool bt_ble_hid_exist; bool bt_mailbox_reply; bool wl_under_lps; @@ -1322,9 +1334,16 @@ struct rtw_coex_stat { bool wl_cck_lock; bool wl_cck_lock_pre; bool wl_cck_lock_ever; + bool wl_connecting; + bool wl_slot_toggle; + bool wl_slot_toggle_change; /* if toggle to no-toggle */ u32 bt_supported_version; u32 bt_supported_feature; + u32 hi_pri_tx; + u32 hi_pri_rx; + u32 lo_pri_tx; + u32 lo_pri_rx; u32 patch_ver; u16 bt_reg_vendor_ae; u16 bt_reg_vendor_ac; @@ -1346,13 +1365,21 @@ struct rtw_coex_stat { u8 bt_a2dp_bitpool; u8 bt_iqk_state; + u16 wl_beacon_interval; u8 wl_noisy_level; u8 wl_fw_dbg_info[10]; u8 wl_fw_dbg_info_pre[10]; + u8 wl_rx_rate; + u8 wl_tx_rate; + u8 wl_rts_rx_rate; u8 wl_coex_mode; + u8 wl_iot_peer; u8 ampdu_max_time; u8 wl_tput_dir; + u8 wl_toggle_para[6]; + u8 wl_toggle_interval; + u16 score_board; u16 retry_limit; @@ -1362,6 +1389,9 @@ struct rtw_coex_stat { /* counters to record wifi states */ u32 cnt_wl[COEX_CNT_WL_MAX]; + /* counters to record bt c2h data */ + u32 cnt_bt_info_c2h[COEX_BTINFO_SRC_MAX]; + u32 darfrc; u32 darfrch; }; @@ -1377,6 +1407,7 @@ struct rtw_coex { bool freeze; bool freerun; bool wl_rf_off; + bool manual_control; struct rtw_coex_stat stat; struct rtw_coex_dm dm; @@ -1387,6 +1418,10 @@ struct rtw_coex { struct delayed_work defreeze_work; struct delayed_work wl_remain_work; struct delayed_work bt_remain_work; + struct delayed_work wl_connecting_work; + struct delayed_work bt_multi_link_remain_work; + struct delayed_work wl_ccklock_work; + }; #define DPK_RF_REG_NUM 7 @@ -1633,6 +1668,7 @@ struct rtw_fw_state { u8 sub_index; u16 h2c_version; u8 prev_dump_seq; + u32 feature; }; struct rtw_hal { @@ -1739,6 +1775,7 @@ struct rtw_dev { /* lps power state & handler work */ struct rtw_lps_conf lps_conf; bool ps_enabled; + struct completion lps_leave_check; struct dentry *debugfs; diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index ca17aa9cf7dc..7cdefe229824 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -5,6 +5,8 @@ #ifndef __RTK_PCI_H_ #define __RTK_PCI_H_ +#include "main.h" + #define RTK_DEFAULT_TX_DESC_NUM 128 #define RTK_BEQ_TX_DESC_NUM 256 @@ -212,6 +214,12 @@ struct rtw_pci { void __iomem *mmap; }; +extern const struct dev_pm_ops rtw_pm_ops; + +int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +void rtw_pci_remove(struct pci_dev *pdev); +void rtw_pci_shutdown(struct pci_dev *pdev); + static inline u32 max_num_of_tx_queue(u8 queue) { u32 max_num; diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 5cd9cc42648e..d44960cd940c 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -541,6 +541,12 @@ static void rtw_phy_cck_pd(struct rtw_dev *rtwdev) else dm_info->cck_fa_avg = (dm_info->cck_fa_avg * 3 + cck_fa) >> 2; + rtw_dbg(rtwdev, RTW_DBG_PHY, "IGI=0x%x, rssi_min=%d, cck_fa=%d\n", + dm_info->igi_history[0], dm_info->min_rssi, + dm_info->fa_history[0]); + rtw_dbg(rtwdev, RTW_DBG_PHY, "cck_fa_avg=%d, cck_pd_default=%d\n", + dm_info->cck_fa_avg, dm_info->cck_pd_default); + level = rtw_phy_cck_pd_lv(rtwdev); if (level >= CCK_PD_LV_MAX) diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c index 7a189a9926fe..3bead34c3d10 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.c +++ b/drivers/net/wireless/realtek/rtw88/ps.c @@ -68,48 +68,39 @@ int rtw_leave_ips(struct rtw_dev *rtwdev) void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter) { u8 request, confirm, polling; - u8 polling_cnt; - u8 retry_cnt = 0; - - for (retry_cnt = 0; retry_cnt < 3; retry_cnt++) { - request = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); - confirm = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr); - - /* toggle to request power mode, others remain 0 */ - request ^= request | BIT_RPWM_TOGGLE; - if (!enter) { - request |= POWER_MODE_ACK; - } else { - request |= POWER_MODE_LCLK; - if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG) - request |= POWER_MODE_PG; - } - - rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request); - - if (enter) - return; - - /* check confirm power mode has left power save state */ - for (polling_cnt = 0; polling_cnt < 50; polling_cnt++) { - polling = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr); - if ((polling ^ confirm) & BIT_RPWM_TOGGLE) - return; - udelay(100); - } - - /* in case of fw/hw missed the request, retry */ - rtw_warn(rtwdev, "failed to leave deep PS, retry=%d\n", - retry_cnt); + int ret; + + request = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); + confirm = rtw_read8(rtwdev, rtwdev->hci.cpwm_addr); + + /* toggle to request power mode, others remain 0 */ + request ^= request | BIT_RPWM_TOGGLE; + if (enter) { + request |= POWER_MODE_LCLK; + if (rtw_get_lps_deep_mode(rtwdev) == LPS_DEEP_MODE_PG) + request |= POWER_MODE_PG; } + /* Each request require an ack from firmware */ + request |= POWER_MODE_ACK; - /* Hit here means that driver failed to change hardware power mode to - * active state after retry 3 times. If the power state is locked at - * Deep sleep, most of the hardware circuits is not working, even - * register read/write. It should be treated as fatal error and - * requires an entire analysis about the firmware/hardware - */ - WARN(1, "Hardware power state locked\n"); + rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, request); + + /* Check firmware get the power requset and ack via cpwm register */ + ret = read_poll_timeout_atomic(rtw_read8, polling, + (polling ^ confirm) & BIT_RPWM_TOGGLE, + 100, 15000, true, rtwdev, + rtwdev->hci.cpwm_addr); + if (ret) { + /* Hit here means that driver failed to get an ack from firmware. + * The reason could be that hardware is locked at Deep sleep, + * so most of the hardware circuits are not working, even + * register read/write; or firmware is locked in some state and + * cannot get the request. It should be treated as fatal error + * and requires an entire analysis about the firmware/hardware. + */ + WARN(1, "firmware failed to ack driver for %s Deep Power mode\n", + enter ? "entering" : "leaving"); + } } EXPORT_SYMBOL(rtw_power_mode_change); @@ -118,7 +109,7 @@ static void __rtw_leave_lps_deep(struct rtw_dev *rtwdev) rtw_hci_deep_ps(rtwdev, false); } -static void rtw_fw_leave_lps_state_check(struct rtw_dev *rtwdev) +static int __rtw_fw_leave_lps_check_reg(struct rtw_dev *rtwdev) { int i; @@ -136,12 +127,53 @@ static void rtw_fw_leave_lps_state_check(struct rtw_dev *rtwdev) */ for (i = 0 ; i < LEAVE_LPS_TRY_CNT; i++) { if (rtw_read32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN) == 0) - return; + return 0; msleep(20); } - rtw_write32_mask(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN, 0); - rtw_warn(rtwdev, "firmware failed to restore hardware setting\n"); + return -EBUSY; +} + +static int __rtw_fw_leave_lps_check_c2h(struct rtw_dev *rtwdev) +{ + if (wait_for_completion_timeout(&rtwdev->lps_leave_check, + LEAVE_LPS_TIMEOUT)) + return 0; + return -EBUSY; +} + +static void rtw_fw_leave_lps_check(struct rtw_dev *rtwdev) +{ + bool ret = false; + struct rtw_fw_state *fw; + + if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) + fw = &rtwdev->wow_fw; + else + fw = &rtwdev->fw; + + if (fw->feature & FW_FEATURE_LPS_C2H) + ret = __rtw_fw_leave_lps_check_c2h(rtwdev); + else + ret = __rtw_fw_leave_lps_check_reg(rtwdev); + + if (ret) { + rtw_write32_clr(rtwdev, REG_TCR, BIT_PWRMGT_HWDATA_EN); + rtw_warn(rtwdev, "firmware failed to leave lps state\n"); + } +} + +static void rtw_fw_leave_lps_check_prepare(struct rtw_dev *rtwdev) +{ + struct rtw_fw_state *fw; + + if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) + fw = &rtwdev->wow_fw; + else + fw = &rtwdev->fw; + + if (fw->feature & FW_FEATURE_LPS_C2H) + reinit_completion(&rtwdev->lps_leave_check); } static void rtw_leave_lps_core(struct rtw_dev *rtwdev) @@ -154,17 +186,26 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev) conf->smart_ps = 0; rtw_hci_link_ps(rtwdev, false); + rtw_fw_leave_lps_check_prepare(rtwdev); rtw_fw_set_pwr_mode(rtwdev); - rtw_fw_leave_lps_state_check(rtwdev); + rtw_fw_leave_lps_check(rtwdev); clear_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags); rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE); } +enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev) +{ + if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) + return rtwdev->lps_conf.wow_deep_mode; + else + return rtwdev->lps_conf.deep_mode; +} + static void __rtw_enter_lps_deep(struct rtw_dev *rtwdev) { - if (rtwdev->lps_conf.deep_mode == LPS_DEEP_MODE_NONE) + if (rtw_get_lps_deep_mode(rtwdev) == LPS_DEEP_MODE_NONE) return; if (!test_bit(RTW_FLAG_LEISURE_PS, rtwdev->flags)) { @@ -173,7 +214,7 @@ static void __rtw_enter_lps_deep(struct rtw_dev *rtwdev) return; } - if (rtw_fw_lps_deep_mode == LPS_DEEP_MODE_PG) + if (rtw_get_lps_deep_mode(rtwdev) == LPS_DEEP_MODE_PG) rtw_fw_set_pg_info(rtwdev); rtw_hci_deep_ps(rtwdev, true); diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h index 19afceca7d0e..7819391c8663 100644 --- a/drivers/net/wireless/realtek/rtw88/ps.h +++ b/drivers/net/wireless/realtek/rtw88/ps.h @@ -12,6 +12,7 @@ #define POWER_MODE_LCLK BIT(0) #define LEAVE_LPS_TRY_CNT 5 +#define LEAVE_LPS_TIMEOUT msecs_to_jiffies(100) int rtw_enter_ips(struct rtw_dev *rtwdev); int rtw_leave_ips(struct rtw_dev *rtwdev); @@ -20,5 +21,5 @@ void rtw_power_mode_change(struct rtw_dev *rtwdev, bool enter); void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id); void rtw_leave_lps(struct rtw_dev *rtwdev); void rtw_leave_lps_deep(struct rtw_dev *rtwdev); - +enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 86b94c008a27..cf9a3b674d30 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -60,7 +60,7 @@ #define REG_GPIO_MUXCFG 0x0040 #define BIT_FSPI_EN BIT(19) #define BIT_EN_SIC BIT(12) -#define BIT_BT_AOD_GPIO3 BIT(9) + #define BIT_PO_BT_PTA_PINS BIT(9) #define BIT_BT_PTA_EN BIT(5) #define BIT_WLRFE_4_5_EN BIT(2) @@ -463,11 +463,17 @@ #define REG_BT_COEX_TABLE_H3 0x06CF #define REG_BBPSF_CTRL 0x06DC -#define REG_BT_COEX_V2 0x0763 -#define BIT_GNT_BT_POLARITY BIT(4) +#define REG_BT_COEX_V2 0x0762 +#define BIT_GNT_BT_POLARITY BIT(12) #define BIT_LTE_COEX_EN BIT(7) +#define REG_BT_COEX_ENH_INTR_CTRL 0x76E +#define BIT_R_GRANTALL_WLMASK BIT(3) +#define BIT_STATIS_BT_EN BIT(2) +#define REG_BT_ACT_STATISTICS 0x0770 +#define REG_BT_ACT_STATISTICS_1 0x0774 #define REG_BT_STAT_CTRL 0x0778 #define REG_BT_TDMA_TIME 0x0790 +#define BIT_MASK_SAMPLE_RATE GENMASK(5, 0) #define REG_LTR_IDLE_LATENCY 0x0798 #define REG_LTR_ACTIVE_LATENCY 0x079C #define REG_LTR_CTRL_BASIC 0x07A4 @@ -483,6 +489,8 @@ #define REG_2ND_CCA_CTRL 0x0976 #define REG_CCK0_FAREPORT 0xa2c +#define BIT_CCK0_2RX BIT(18) +#define BIT_CCK0_MRC BIT(22) #define REG_DIS_DPD 0x0a70 #define DIS_DPD_MASK GENMASK(9, 0) @@ -618,6 +626,9 @@ #define REG_ANAPAR 0x1c30 #define BIT_ANAPAR_BTPS BIT(22) #define REG_RSTB_SEL 0x1c38 +#define BIT_DAC_OFF_ENABLE BIT(4) +#define BIT_PI_IGNORE_GNT_BT BIT(3) +#define BIT_NOMASK_TXBT_ENABLE BIT(3) #define REG_HRCV_MSG 0x1cf diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index 3ddd170f1651..9268ea8b6dda 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -192,6 +192,7 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev) rtw_write32(rtwdev, REG_LTR_CTRL_BASIC + 4, WLAN_LTR_CTRL2); rtw_phy_init(rtwdev); + rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f; rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); @@ -1498,6 +1499,34 @@ out: rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] finished\n"); } +static void rtw8723d_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 pd[CCK_PD_LV_MAX] = {3, 7, 13, 13, 13}; + u8 cck_n_rx; + + rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n", + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl); + + if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl) + return; + + cck_n_rx = (rtw_read8_mask(rtwdev, REG_CCK0_FAREPORT, BIT_CCK0_2RX) && + rtw_read8_mask(rtwdev, REG_CCK0_FAREPORT, BIT_CCK0_MRC)) ? 2 : 1; + rtw_dbg(rtwdev, RTW_DBG_PHY, + "is_linked=%d, lv=%d, n_rx=%d, cs_ratio=0x%x, pd_th=0x%x, cck_fa_avg=%d\n", + rtw_is_assoc(rtwdev), new_lvl, cck_n_rx, + dm_info->cck_pd_default + new_lvl * 2, + pd[new_lvl], dm_info->cck_fa_avg); + + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; + + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl; + rtw_write32_mask(rtwdev, REG_PWRTH, 0x3f0000, pd[new_lvl]); + rtw_write32_mask(rtwdev, REG_PWRTH2, 0x1f0000, + dm_info->cck_pd_default + new_lvl * 2); +} + /* for coex */ static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev) { @@ -1506,14 +1535,14 @@ static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev) /* BT report packet sample rate */ /* 0x790[5:0]=0x5 */ - rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05); + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); /* enable BT counter statistics */ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); /* enable PTA (3-wire function form BT side) */ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); - rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); /* enable PTA (tx/rx signal form WiFi side) */ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); @@ -1931,6 +1960,7 @@ static struct rtw_chip_ops rtw8723d_ops = { .efuse_grant = rtw8723d_efuse_grant, .false_alarm_statistics = rtw8723d_false_alarm_statistics, .phy_calibration = rtw8723d_phy_calibration, + .cck_pd_set = rtw8723d_phy_cck_pd_set, .pwr_track = rtw8723d_pwr_track, .config_bfee = NULL, .set_gid_table = NULL, @@ -1949,19 +1979,19 @@ static struct rtw_chip_ops rtw8723d_ops = { static const struct coex_table_para table_sant_8723d[] = { {0xffffffff, 0xffffffff}, /* case-0 */ {0x55555555, 0x55555555}, - {0x65555555, 0x65555555}, + {0x66555555, 0x66555555}, {0xaaaaaaaa, 0xaaaaaaaa}, {0x5a5a5a5a, 0x5a5a5a5a}, {0xfafafafa, 0xfafafafa}, /* case-5 */ - {0xa5555555, 0xaaaa5aaa}, - {0x6a5a5a5a, 0x5a5a5a5a}, + {0x6a5a5555, 0xaaaaaaaa}, + {0x6a5a56aa, 0x6a5a56aa}, {0x6a5a5a5a, 0x6a5a5a5a}, {0x66555555, 0x5a5a5a5a}, - {0x65555555, 0x6a5a5a5a}, /* case-10 */ - {0x65555555, 0xfafafafa}, + {0x66555555, 0x6a5a5a5a}, /* case-10 */ + {0x66555555, 0x6a5a5aaa}, {0x66555555, 0x5a5a5aaa}, - {0x65555555, 0x5aaa5aaa}, - {0x65555555, 0xaaaa5aaa}, + {0x66555555, 0x6aaa5aaa}, + {0x66555555, 0xaaaa5aaa}, {0x66555555, 0xaaaaaaaa}, /* case-15 */ {0xffff55ff, 0xfafafafa}, {0xffff55ff, 0x6afa5afa}, @@ -1970,38 +2000,41 @@ static const struct coex_table_para table_sant_8723d[] = { {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ {0xaa5555aa, 0xaaaaaaaa}, {0xffffffff, 0x5a5a5a5a}, - {0xffffffff, 0x6a5a5a5a}, + {0xffffffff, 0x5a5a5a5a}, {0xffffffff, 0x55555555}, - {0xffffffff, 0x6a5a5aaa}, /* case-25 */ + {0xffffffff, 0x5a5a5aaa}, /* case-25 */ {0x55555555, 0x5a5a5a5a}, {0x55555555, 0xaaaaaaaa}, - {0x55555555, 0x6a6a6a6a}, - {0x656a656a, 0x656a656a} + {0x55555555, 0x6a5a6a5a}, + {0x66556655, 0x66556655}, + {0x66556aaa, 0x6a5a6aaa}, /* case-30 */ + {0xffffffff, 0x5aaa5aaa}, + {0x56555555, 0x5a5a5aaa}, }; /* Non-Shared-Antenna Coex Table */ static const struct coex_table_para table_nsant_8723d[] = { {0xffffffff, 0xffffffff}, /* case-100 */ {0x55555555, 0x55555555}, - {0x65555555, 0x65555555}, + {0x66555555, 0x66555555}, {0xaaaaaaaa, 0xaaaaaaaa}, {0x5a5a5a5a, 0x5a5a5a5a}, {0xfafafafa, 0xfafafafa}, /* case-105 */ {0x5afa5afa, 0x5afa5afa}, {0x55555555, 0xfafafafa}, - {0x65555555, 0xfafafafa}, - {0x65555555, 0x5a5a5a5a}, - {0x65555555, 0x6a5a5a5a}, /* case-110 */ - {0x65555555, 0xaaaaaaaa}, + {0x66555555, 0xfafafafa}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-110 */ + {0x66555555, 0xaaaaaaaa}, {0xffff55ff, 0xfafafafa}, {0xffff55ff, 0x5afa5afa}, {0xffff55ff, 0xaaaaaaaa}, - {0xaaffffaa, 0xfafafafa}, /* case-115 */ + {0xffff55ff, 0xffff55ff}, /* case-115 */ {0xaaffffaa, 0x5afa5afa}, {0xaaffffaa, 0xaaaaaaaa}, {0xffffffff, 0xfafafafa}, {0xffffffff, 0x5afa5afa}, - {0xffffffff, 0xaaaaaaaa},/* case-120 */ + {0xffffffff, 0xaaaaaaaa}, /* case-120 */ {0x55ff55ff, 0x5afa5afa}, {0x55ff55ff, 0xaaaaaaaa}, {0x55ff55ff, 0x55ff55ff} @@ -2009,31 +2042,31 @@ static const struct coex_table_para table_nsant_8723d[] = { /* Shared-Antenna TDMA */ static const struct coex_tdma_para tdma_sant_8723d[] = { - { {0x08, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ { {0x61, 0x3a, 0x03, 0x11, 0x11} }, - { {0x61, 0x20, 0x03, 0x11, 0x11} }, { {0x61, 0x30, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */ - { {0x61, 0x48, 0x03, 0x11, 0x10} }, + { {0x61, 0x45, 0x03, 0x11, 0x10} }, { {0x61, 0x3a, 0x03, 0x11, 0x10} }, { {0x61, 0x30, 0x03, 0x11, 0x10} }, { {0x61, 0x20, 0x03, 0x11, 0x10} }, { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ - { {0x61, 0x10, 0x03, 0x11, 0x14} }, + { {0x61, 0x08, 0x03, 0x11, 0x14} }, { {0x61, 0x08, 0x03, 0x10, 0x14} }, - { {0x51, 0x10, 0x03, 0x10, 0x54} }, - { {0x51, 0x10, 0x03, 0x10, 0x55} }, - { {0x51, 0x10, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ { {0x51, 0x45, 0x03, 0x10, 0x50} }, { {0x51, 0x3a, 0x03, 0x10, 0x50} }, { {0x51, 0x30, 0x03, 0x10, 0x50} }, { {0x51, 0x20, 0x03, 0x10, 0x50} }, - { {0x51, 0x15, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ { {0x51, 0x4a, 0x03, 0x10, 0x50} }, { {0x51, 0x0c, 0x03, 0x10, 0x54} }, { {0x55, 0x08, 0x03, 0x10, 0x54} }, - { {0x65, 0x10, 0x03, 0x11, 0x11} }, + { {0x65, 0x10, 0x03, 0x11, 0x10} }, { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ { {0x51, 0x08, 0x03, 0x10, 0x50} }, { {0x61, 0x08, 0x03, 0x11, 0x11} } @@ -2041,7 +2074,7 @@ static const struct coex_tdma_para tdma_sant_8723d[] = { /* Non-Shared-Antenna TDMA */ static const struct coex_tdma_para tdma_nsant_8723d[] = { - { {0x00, 0x00, 0x00, 0x40, 0x01} }, /* case-100 */ + { {0x00, 0x00, 0x00, 0x00, 0x01} }, /* case-100 */ { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-101 */ { {0x61, 0x3a, 0x03, 0x11, 0x11} }, { {0x61, 0x30, 0x03, 0x11, 0x11} }, @@ -2062,7 +2095,7 @@ static const struct coex_tdma_para tdma_nsant_8723d[] = { { {0x51, 0x30, 0x03, 0x10, 0x50} }, { {0x51, 0x20, 0x03, 0x10, 0x50} }, { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-120 */ - { {0x51, 0x08, 0x03, 0x10, 0x50} }, + { {0x51, 0x08, 0x03, 0x10, 0x50} } }; /* rssi in percentage % (dbm = % - 100) */ @@ -2719,6 +2752,7 @@ struct rtw_chip_info rtw8723d_hw_spec = { .bt_desired_ver = 0x2f, .scbd_support = true, .new_scbd10_def = true, + .ble_hid_profile_support = false, .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, .bt_rssi_type = COEX_BTRSSI_RATIO, .ant_isolation = 15, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h index 7894d321cd7e..41d35174a542 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h @@ -163,6 +163,7 @@ static inline s32 iqk_mult(s32 x, s32 y, s32 *ext) #define REG_CCK0_SYS 0x0a00 #define BIT_CCK_SIDE_BAND BIT(4) #define REG_CCK_ANT_SEL_11N 0x0a04 +#define REG_PWRTH 0x0a08 #define REG_CCK_FA_RST_11N 0x0a2c #define BIT_MASK_CCK_CNT_KEEP BIT(12) #define BIT_MASK_CCK_CNT_EN BIT(13) @@ -175,6 +176,8 @@ static inline s32 iqk_mult(s32 x, s32 y, s32 *ext) #define REG_CCK_CCA_CNT_11N 0x0a60 #define BIT_MASK_CCK_FA_MSB GENMASK(7, 0) #define BIT_MASK_CCK_FA_LSB GENMASK(15, 8) +#define REG_PWRTH2 0x0aa8 +#define REG_CSRATIO 0x0aaa #define REG_OFDM_FA_HOLDC_11N 0x0c00 #define BIT_MASK_OFDM_FA_KEEP BIT(31) #define REG_BB_RX_PATH_11N 0x0c04 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.c b/drivers/net/wireless/realtek/rtw88/rtw8723de.c index c81eb4c33642..2dd689441e8d 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723de.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include "pci.h" #include "rtw8723de.h" static const struct pci_device_id rtw_8723de_id_table[] = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723de.h b/drivers/net/wireless/realtek/rtw88/rtw8723de.h index ba3842360c20..2b4894846a07 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723de.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8723de.h @@ -5,10 +5,6 @@ #ifndef __RTW_8723DE_H_ #define __RTW_8723DE_H_ -extern const struct dev_pm_ops rtw_pm_ops; extern struct rtw_chip_info rtw8723d_hw_spec; -int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -void rtw_pci_remove(struct pci_dev *pdev); -void rtw_pci_shutdown(struct pci_dev *pdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index da2e7415be8f..fbfd85439d1f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -656,8 +656,7 @@ static void rtw8821c_coex_cfg_init(struct rtw_dev *rtwdev) rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); /* BT report packet sample rate */ - rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, SAMPLE_RATE_MASK, - SAMPLE_RATE); + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); /* enable BT counter statistics */ rtw_write8(rtwdev, REG_BT_STAT_CTRL, BT_CNT_ENABLE); @@ -1021,6 +1020,7 @@ static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; u8 pd[CCK_PD_LV_MAX] = {3, 7, 13, 13, 13}; + u8 cck_n_rx; if (dm_info->min_rssi > 60) { new_lvl = 4; @@ -1028,9 +1028,20 @@ static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) goto set_cck_pd; } + rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n", + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl); + if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl) return; + cck_n_rx = (rtw_read8_mask(rtwdev, REG_CCK0_FAREPORT, BIT_CCK0_2RX) && + rtw_read8_mask(rtwdev, REG_CCK0_FAREPORT, BIT_CCK0_MRC)) ? 2 : 1; + rtw_dbg(rtwdev, RTW_DBG_PHY, + "is_linked=%d, lv=%d, n_rx=%d, cs_ratio=0x%x, pd_th=0x%x, cck_fa_avg=%d\n", + rtw_is_assoc(rtwdev), new_lvl, cck_n_rx, + dm_info->cck_pd_default + new_lvl * 2, + pd[new_lvl], dm_info->cck_fa_avg); + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; set_cck_pd: @@ -1819,6 +1830,7 @@ struct rtw_chip_info rtw8821c_hw_spec = { .bt_desired_ver = 0x46, .scbd_support = true, .new_scbd10_def = false, + .ble_hid_profile_support = false, .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, .bt_rssi_type = COEX_BTRSSI_RATIO, .ant_isolation = 15, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index bd01e82b6bcd..e11e3fc41c95 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -231,8 +231,6 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_IQKFAILMSK 0x1bf0 #define BIT_MASK_R_RFE_SEL_15 GENMASK(31, 28) #define BIT_SDIO_INT BIT(18) -#define SAMPLE_RATE_MASK GENMASK(5, 0) -#define SAMPLE_RATE 0x5 #define BT_CNT_ENABLE 0x1 #define BIT_BCN_QUEUE BIT(3) #define BCN_PRI_EN 0x1 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c index 616fdcfd62c9..f34de115e4bc 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include "pci.h" #include "rtw8821ce.h" static const struct pci_device_id rtw_8821ce_id_table[] = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h index 8d3eb77a876b..54142acca534 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h @@ -5,10 +5,6 @@ #ifndef __RTW_8821CE_H_ #define __RTW_8821CE_H_ -extern const struct dev_pm_ops rtw_pm_ops; extern struct rtw_chip_info rtw8821c_hw_spec; -int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -void rtw_pci_remove(struct pci_dev *pdev); -void rtw_pci_shutdown(struct pci_dev *pdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 22d0dd640ac9..f1789155e901 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -87,7 +87,7 @@ static const u32 rtw8822b_txscale_tbl[RTW_TXSCALE_SIZE] = { 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe }; -static const u8 rtw8822b_get_swing_index(struct rtw_dev *rtwdev) +static u8 rtw8822b_get_swing_index(struct rtw_dev *rtwdev) { u8 i = 0; u32 swing, table_value; @@ -1120,21 +1120,21 @@ static void rtw8822b_coex_cfg_init(struct rtw_dev *rtwdev) /* BT report packet sample rate */ /* 0x790[5:0]=0x5 */ - rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05); + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); /* enable BT counter statistics */ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); /* enable PTA (3-wire function form BT side) */ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); - rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); /* enable PTA (tx/rx signal form WiFi side) */ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); /* wl tx signal to PTA not case EDCCA */ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN); /* GNT_BT=1 while select both */ - rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); + rtw_write16_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); } static void rtw8822b_coex_cfg_ant_switch(struct rtw_dev *rtwdev, @@ -1341,6 +1341,7 @@ static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) coex_dm->cur_wl_rx_low_gain_en = low_gain; if (coex_dm->cur_wl_rx_low_gain_en) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], Hi-Li Table On!\n"); for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_on); i++) rtw_write32(rtwdev, REG_RX_GAIN_EN, wl_rx_low_gain_on[i]); @@ -1350,6 +1351,7 @@ static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x1); rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x3f); } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], Hi-Li Table Off!\n"); for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_off); i++) rtw_write32(rtwdev, 0x81c, wl_rx_low_gain_off[i]); @@ -2141,14 +2143,14 @@ static const struct coex_table_para table_sant_8822b[] = { {0xaaaaaaaa, 0xaaaaaaaa}, {0x5a5a5a5a, 0x5a5a5a5a}, {0xfafafafa, 0xfafafafa}, /* case-5 */ - {0x6a5a6a5a, 0xaaaaaaaa}, + {0x6a5a5555, 0xaaaaaaaa}, {0x6a5a56aa, 0x6a5a56aa}, {0x6a5a5a5a, 0x6a5a5a5a}, {0x66555555, 0x5a5a5a5a}, {0x66555555, 0x6a5a5a5a}, /* case-10 */ {0x66555555, 0xfafafafa}, {0x66555555, 0x5a5a5aaa}, - {0x66555555, 0x5aaa5aaa}, + {0x66555555, 0x6aaa5aaa}, {0x66555555, 0xaaaa5aaa}, {0x66555555, 0xaaaaaaaa}, /* case-15 */ {0xffff55ff, 0xfafafafa}, @@ -2158,13 +2160,16 @@ static const struct coex_table_para table_sant_8822b[] = { {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ {0xaa5555aa, 0xaaaaaaaa}, {0xffffffff, 0x5a5a5a5a}, - {0xffffffff, 0x6a5a5a5a}, + {0xffffffff, 0x5a5a5a5a}, {0xffffffff, 0x55555555}, {0xffffffff, 0x6a5a5aaa}, /* case-25 */ {0x55555555, 0x5a5a5a5a}, {0x55555555, 0xaaaaaaaa}, {0x55555555, 0x6a5a6a5a}, - {0x66556655, 0x66556655} + {0x66556655, 0x66556655}, + {0x66556aaa, 0x6a5a6aaa}, /* case-30 */ + {0xffffffff, 0x5aaa5aaa}, + {0x56555555, 0x5a5a5aaa}, }; /* Non-Shared-Antenna Coex Table */ @@ -2184,7 +2189,7 @@ static const struct coex_table_para table_nsant_8822b[] = { {0xffff55ff, 0xfafafafa}, {0xffff55ff, 0x5afa5afa}, {0xffff55ff, 0xaaaaaaaa}, - {0xaaffffaa, 0xfafafafa}, /* case-115 */ + {0xffff55ff, 0xffff55ff}, /* case-115 */ {0xaaffffaa, 0x5afa5afa}, {0xaaffffaa, 0xaaaaaaaa}, {0xffffffff, 0xfafafafa}, @@ -2213,7 +2218,7 @@ static const struct coex_tdma_para tdma_sant_8822b[] = { { {0x51, 0x08, 0x03, 0x10, 0x54} }, { {0x51, 0x08, 0x03, 0x10, 0x55} }, { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ - { {0x51, 0x45, 0x03, 0x10, 0x10} }, + { {0x51, 0x45, 0x03, 0x10, 0x50} }, { {0x51, 0x3a, 0x03, 0x10, 0x50} }, { {0x51, 0x30, 0x03, 0x10, 0x50} }, { {0x51, 0x20, 0x03, 0x10, 0x50} }, @@ -2221,7 +2226,7 @@ static const struct coex_tdma_para tdma_sant_8822b[] = { { {0x51, 0x4a, 0x03, 0x10, 0x50} }, { {0x51, 0x0c, 0x03, 0x10, 0x54} }, { {0x55, 0x08, 0x03, 0x10, 0x54} }, - { {0x65, 0x10, 0x03, 0x11, 0x11} }, + { {0x65, 0x10, 0x03, 0x11, 0x10} }, { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ { {0x51, 0x08, 0x03, 0x10, 0x50} }, { {0x61, 0x08, 0x03, 0x11, 0x11} } @@ -2230,7 +2235,7 @@ static const struct coex_tdma_para tdma_sant_8822b[] = { /* Non-Shared-Antenna TDMA */ static const struct coex_tdma_para tdma_nsant_8822b[] = { { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */ - { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-101 */ { {0x61, 0x3a, 0x03, 0x11, 0x11} }, { {0x61, 0x30, 0x03, 0x11, 0x11} }, { {0x61, 0x20, 0x03, 0x11, 0x11} }, @@ -2249,13 +2254,13 @@ static const struct coex_tdma_para tdma_nsant_8822b[] = { { {0x51, 0x3a, 0x03, 0x10, 0x50} }, { {0x51, 0x30, 0x03, 0x10, 0x50} }, { {0x51, 0x20, 0x03, 0x10, 0x50} }, - { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */ + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-120 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} } }; /* rssi in percentage % (dbm = % - 100) */ static const u8 wl_rssi_step_8822b[] = {60, 50, 44, 30}; static const u8 bt_rssi_step_8822b[] = {30, 30, 30, 30}; -static const struct coex_5g_afh_map afh_5g_8822b[] = { {0, 0, 0} }; /* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ static const struct coex_rf_para rf_para_tx_8822b[] = { @@ -2276,6 +2281,27 @@ static const struct coex_rf_para rf_para_rx_8822b[] = { {1, 13, true, 1} }; +static const struct coex_5g_afh_map afh_5g_8822b[] = { + {120, 2, 4}, + {124, 8, 8}, + {128, 17, 8}, + {132, 26, 10}, + {136, 34, 8}, + {140, 42, 10}, + {144, 51, 8}, + {149, 62, 8}, + {153, 71, 10}, + {157, 77, 4}, + {118, 2, 4}, + {126, 12, 16}, + {134, 29, 16}, + {142, 46, 16}, + {151, 66, 16}, + {159, 76, 4}, + {122, 10, 20}, + {138, 37, 34}, + {155, 68, 20} +}; static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b)); static const u8 @@ -2481,6 +2507,7 @@ struct rtw_chip_info rtw8822b_hw_spec = { .bt_desired_ver = 0x6, .scbd_support = true, .new_scbd10_def = false, + .ble_hid_profile_support = false, .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, .bt_rssi_type = COEX_BTRSSI_RATIO, .ant_isolation = 15, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.c b/drivers/net/wireless/realtek/rtw88/rtw8822be.c index 921916ae15ca..62ee7e62cac0 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822be.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include "pci.h" #include "rtw8822be.h" static const struct pci_device_id rtw_8822be_id_table[] = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822be.h b/drivers/net/wireless/realtek/rtw88/rtw8822be.h index d823ca059f5c..6668460d664d 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822be.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822be.h @@ -5,10 +5,6 @@ #ifndef __RTW_8822BE_H_ #define __RTW_8822BE_H_ -extern const struct dev_pm_ops rtw_pm_ops; extern struct rtw_chip_info rtw8822b_hw_spec; -int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -void rtw_pci_remove(struct pci_dev *pdev); -void rtw_pci_shutdown(struct pci_dev *pdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index e37300e98517..dd560c28abb2 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -18,6 +18,8 @@ #include "bf.h" #include "efuse.h" +#define IQK_DONE_8822C 0xaa + static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path, u8 rx_path, bool is_tx2_path); @@ -2110,20 +2112,17 @@ static void rtw8822c_do_iqk(struct rtw_dev *rtwdev) { struct rtw_iqk_para para = {0}; u8 iqk_chk; - int counter; + int ret; para.clear = 1; rtw_fw_do_iqk(rtwdev, ¶); - for (counter = 0; counter < 300; counter++) { - iqk_chk = rtw_read8(rtwdev, REG_RPT_CIP); - if (iqk_chk == 0xaa) - break; - msleep(20); - } - rtw_write8(rtwdev, REG_IQKSTAT, 0x0); + ret = read_poll_timeout(rtw_read8, iqk_chk, iqk_chk == IQK_DONE_8822C, + 20000, 300000, false, rtwdev, REG_RPT_CIP); + if (ret) + rtw_warn(rtwdev, "failed to poll iqk status bit\n"); - rtw_dbg(rtwdev, RTW_DBG_RFK, "iqk counter=%d\n", counter); + rtw_write8(rtwdev, REG_IQKSTAT, 0x0); } /* for coex */ @@ -2132,28 +2131,28 @@ static void rtw8822c_coex_cfg_init(struct rtw_dev *rtwdev) /* enable TBTT nterrupt */ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); - /* BT report packet sample rate */ + /* BT report packet sample rate */ /* 0x790[5:0]=0x5 */ - rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05); + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); /* enable BT counter statistics */ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1); /* enable PTA (3-wire function form BT side) */ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); - rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); /* enable PTA (tx/rx signal form WiFi side) */ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); /* wl tx signal to PTA not case EDCCA */ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN); /* GNT_BT=1 while select both */ - rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); - /* BT_CCA = ~GNT_WL_BB, (not or GNT_BT_BB, LTE_Rx */ + rtw_write16_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); + /* BT_CCA = ~GNT_WL_BB, not or GNT_BT_BB, LTE_Rx */ rtw_write8_clr(rtwdev, REG_DUMMY_PAGE4_V1, BIT_BTCCA_CTRL); /* to avoid RF parameter error */ - rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, 0x40000); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODOPT, 0xfffff, 0x40000); } static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) @@ -2190,10 +2189,10 @@ static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) * disable WL-S1 BB chage RF mode if GNT_BT * since RF TRx mask can do it */ - rtw_write8_mask(rtwdev, 0x1c32, BIT(6), 1); - rtw_write8_mask(rtwdev, 0x1c39, BIT(4), 0); - rtw_write8_mask(rtwdev, 0x1c3b, BIT(4), 1); - rtw_write8_mask(rtwdev, 0x4160, BIT(3), 1); + rtw_write8_mask(rtwdev, REG_ANAPAR + 2, BIT_ANAPAR_BTPS >> 16, 1); + rtw_write8_mask(rtwdev, REG_RSTB_SEL + 1, BIT_DAC_OFF_ENABLE, 0); + rtw_write8_mask(rtwdev, REG_RSTB_SEL + 3, BIT_DAC_OFF_ENABLE, 1); + rtw_write8_mask(rtwdev, REG_IGN_GNTBT4, BIT_PI_IGNORE_GNT_BT, 1); /* disable WL-S0 BB chage RF mode if wifi is at 5G, * or antenna path is separated @@ -2201,26 +2200,32 @@ static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) if (coex_stat->wl_coex_mode == COEX_WLINK_5G || coex->under_5g || !efuse->share_ant) { if (coex_stat->kt_ver >= 3) { - rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0); - rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 1); + rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1, + BIT_PI_IGNORE_GNT_BT, 0); + rtw_write8_mask(rtwdev, REG_NOMASK_TXBT, + BIT_NOMASK_TXBT_ENABLE, 1); } else { - rtw_write8_mask(rtwdev, 0x1860, BIT(3), 1); + rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1, + BIT_PI_IGNORE_GNT_BT, 1); } } else { /* shared-antenna */ - rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0); - if (coex_stat->kt_ver >= 3) - rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 0); + rtw_write8_mask(rtwdev, REG_IGN_GNT_BT1, + BIT_PI_IGNORE_GNT_BT, 0); + if (coex_stat->kt_ver >= 3) { + rtw_write8_mask(rtwdev, REG_NOMASK_TXBT, + BIT_NOMASK_TXBT_ENABLE, 0); + } } } static void rtw8822c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) { - rtw_write8_mask(rtwdev, 0x66, BIT(4), 0); - rtw_write8_mask(rtwdev, 0x67, BIT(0), 0); - rtw_write8_mask(rtwdev, 0x42, BIT(3), 0); - rtw_write8_mask(rtwdev, 0x65, BIT(7), 0); - rtw_write8_mask(rtwdev, 0x73, BIT(3), 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 2, BIT_BTGP_SPI_EN >> 16, 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT_BTGP_JTAG_EN >> 24, 0); + rtw_write8_mask(rtwdev, REG_GPIO_MUXCFG + 2, BIT_FSPI_EN >> 16, 0); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 1, BIT_LED1DIS >> 8, 0); + rtw_write8_mask(rtwdev, REG_SYS_SDIO_CTRL + 3, BIT_DBG_GNT_WL_BT >> 24, 0); } static void rtw8822c_coex_cfg_rfe_type(struct rtw_dev *rtwdev) @@ -2241,9 +2246,9 @@ static void rtw8822c_coex_cfg_rfe_type(struct rtw_dev *rtwdev) coex_rfe->wlg_at_btg = false; /* disable LTE coex in wifi side */ - rtw_coex_write_indirect_reg(rtwdev, 0x38, BIT_LTE_COEX_EN, 0x0); - rtw_coex_write_indirect_reg(rtwdev, 0xa0, MASKLWORD, 0xffff); - rtw_coex_write_indirect_reg(rtwdev, 0xa4, MASKLWORD, 0xffff); + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0x0); + rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff); + rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff); } static void rtw8822c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) @@ -2268,16 +2273,22 @@ static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) coex_dm->cur_wl_rx_low_gain_en = low_gain; if (coex_dm->cur_wl_rx_low_gain_en) { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], Hi-Li Table On!\n"); + /* set Rx filter corner RCK offset */ - rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x22); - rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x36); - rtw_write_rf(rtwdev, RF_PATH_B, 0xde, 0xfffff, 0x22); - rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x36); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, RFREG_MASK, 0x22); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, RFREG_MASK, 0x36); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, RFREG_MASK, 0x22); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, RFREG_MASK, 0x36); + } else { + rtw_dbg(rtwdev, RTW_DBG_COEX, "[BTCoex], Hi-Li Table Off!\n"); + /* set Rx filter corner RCK offset */ - rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x20); - rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x0); - rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, RFREG_MASK, 0x20); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, RFREG_MASK, 0x0); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, RFREG_MASK, 0x20); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, RFREG_MASK, 0x0); } } @@ -2442,7 +2453,7 @@ static void rtw8822c_dpk_rxbb_dc_cal(struct rtw_dev *rtwdev, u8 path) static u8 rtw8822c_dpk_dc_corr_check(struct rtw_dev *rtwdev, u8 path) { u16 dc_i, dc_q; - u8 corr_val, corr_idx; + u8 corr_idx; rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x000900f0); dc_i = (u16)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(27, 16)); @@ -2455,7 +2466,7 @@ static u8 rtw8822c_dpk_dc_corr_check(struct rtw_dev *rtwdev, u8 path) rtw_write32(rtwdev, REG_RXSRAM_CTL, 0x000000f0); corr_idx = (u8)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(7, 0)); - corr_val = (u8)rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(15, 8)); + rtw_read32_mask(rtwdev, REG_STAT_RPT, GENMASK(15, 8)); if (dc_i > 200 || dc_q > 200 || corr_idx < 40 || corr_idx > 65) return 1; @@ -3442,6 +3453,10 @@ rtw8822c_phy_cck_pd_set_reg(struct rtw_dev *rtwdev, rtw8822c_cck_pd_reg[bw][nrx].reg_cs, rtw8822c_cck_pd_reg[bw][nrx].mask_cs, cs); + + rtw_dbg(rtwdev, RTW_DBG_PHY, + "is_linked=%d, bw=%d, nrx=%d, cs_ratio=0x%x, pd_th=0x%x\n", + rtw_is_assoc(rtwdev), bw, nrx, cs, pd); } static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) @@ -3455,6 +3470,10 @@ static void rtw8822c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) nrx = (u8)rtw_read32_mask(rtwdev, 0x1a2c, 0x60000); bw = (u8)rtw_read32_mask(rtwdev, 0x9b0, 0xc); + rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d) bw=%d nr=%d cck_fa_avg=%d\n", + dm_info->cck_pd_lv[bw][nrx], new_lvl, bw, nrx, + dm_info->cck_fa_avg); + if (dm_info->cck_pd_lv[bw][nrx] == new_lvl) return; @@ -3983,14 +4002,14 @@ static const struct coex_table_para table_sant_8822c[] = { {0xaaaaaaaa, 0xaaaaaaaa}, {0x5a5a5a5a, 0x5a5a5a5a}, {0xfafafafa, 0xfafafafa}, /* case-5 */ - {0x6a5a6a5a, 0xaaaaaaaa}, + {0x6a5a5555, 0xaaaaaaaa}, {0x6a5a56aa, 0x6a5a56aa}, {0x6a5a5a5a, 0x6a5a5a5a}, {0x66555555, 0x5a5a5a5a}, {0x66555555, 0x6a5a5a5a}, /* case-10 */ - {0x66555555, 0xfafafafa}, + {0x66555555, 0x6a5a5aaa}, {0x66555555, 0x5a5a5aaa}, - {0x66555555, 0x5aaa5aaa}, + {0x66555555, 0x6aaa5aaa}, {0x66555555, 0xaaaa5aaa}, {0x66555555, 0xaaaaaaaa}, /* case-15 */ {0xffff55ff, 0xfafafafa}, @@ -4000,13 +4019,16 @@ static const struct coex_table_para table_sant_8822c[] = { {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ {0xaa5555aa, 0xaaaaaaaa}, {0xffffffff, 0x5a5a5a5a}, - {0xffffffff, 0x6a5a5a5a}, + {0xffffffff, 0x5a5a5a5a}, {0xffffffff, 0x55555555}, - {0xffffffff, 0x6a5a5aaa}, /* case-25 */ + {0xffffffff, 0x5a5a5aaa}, /* case-25 */ {0x55555555, 0x5a5a5a5a}, {0x55555555, 0xaaaaaaaa}, {0x55555555, 0x6a5a6a5a}, - {0x66556655, 0x66556655} + {0x66556655, 0x66556655}, + {0x66556aaa, 0x6a5a6aaa}, /*case-30*/ + {0xffffffff, 0x5aaa5aaa}, + {0x56555555, 0x5a5a5aaa}, }; /* Non-Shared-Antenna Coex Table */ @@ -4026,12 +4048,12 @@ static const struct coex_table_para table_nsant_8822c[] = { {0xffff55ff, 0xfafafafa}, {0xffff55ff, 0x5afa5afa}, {0xffff55ff, 0xaaaaaaaa}, - {0xaaffffaa, 0xfafafafa}, /* case-115 */ + {0xffff55ff, 0xffff55ff}, /* case-115 */ {0xaaffffaa, 0x5afa5afa}, {0xaaffffaa, 0xaaaaaaaa}, {0xffffffff, 0xfafafafa}, {0xffffffff, 0x5afa5afa}, - {0xffffffff, 0xaaaaaaaa},/* case-120 */ + {0xffffffff, 0xaaaaaaaa}, /* case-120 */ {0x55ff55ff, 0x5afa5afa}, {0x55ff55ff, 0xaaaaaaaa}, {0x55ff55ff, 0x55ff55ff} @@ -4040,7 +4062,7 @@ static const struct coex_table_para table_nsant_8822c[] = { /* Shared-Antenna TDMA */ static const struct coex_tdma_para tdma_sant_8822c[] = { { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ - { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ { {0x61, 0x3a, 0x03, 0x11, 0x11} }, { {0x61, 0x30, 0x03, 0x11, 0x11} }, { {0x61, 0x20, 0x03, 0x11, 0x11} }, @@ -4055,7 +4077,7 @@ static const struct coex_tdma_para tdma_sant_8822c[] = { { {0x51, 0x08, 0x03, 0x10, 0x54} }, { {0x51, 0x08, 0x03, 0x10, 0x55} }, { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ - { {0x51, 0x45, 0x03, 0x10, 0x10} }, + { {0x51, 0x45, 0x03, 0x10, 0x50} }, { {0x51, 0x3a, 0x03, 0x10, 0x50} }, { {0x51, 0x30, 0x03, 0x10, 0x50} }, { {0x51, 0x20, 0x03, 0x10, 0x50} }, @@ -4063,7 +4085,7 @@ static const struct coex_tdma_para tdma_sant_8822c[] = { { {0x51, 0x4a, 0x03, 0x10, 0x50} }, { {0x51, 0x0c, 0x03, 0x10, 0x54} }, { {0x55, 0x08, 0x03, 0x10, 0x54} }, - { {0x65, 0x10, 0x03, 0x11, 0x11} }, + { {0x65, 0x10, 0x03, 0x11, 0x10} }, { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ { {0x51, 0x08, 0x03, 0x10, 0x50} }, { {0x61, 0x08, 0x03, 0x11, 0x11} } @@ -4091,7 +4113,8 @@ static const struct coex_tdma_para tdma_nsant_8822c[] = { { {0x51, 0x3a, 0x03, 0x10, 0x50} }, { {0x51, 0x30, 0x03, 0x10, 0x50} }, { {0x51, 0x20, 0x03, 0x10, 0x50} }, - { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */ + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-120 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} } }; /* rssi in percentage % (dbm = % - 100) */ @@ -4337,10 +4360,11 @@ struct rtw_chip_info rtw8822c_hw_spec = { .wowlan_stub = &rtw_wowlan_stub_8822c, .max_sched_scan_ssids = 4, #endif - .coex_para_ver = 0x20070217, - .bt_desired_ver = 0x17, + .coex_para_ver = 0x201029, + .bt_desired_ver = 0x1c, .scbd_support = true, .new_scbd10_def = true, + .ble_hid_profile_support = true, .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, .bt_rssi_type = COEX_BTRSSI_DBM, .ant_isolation = 15, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c index 026ac49ce6e3..3845b1333dc3 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c @@ -4,6 +4,7 @@ #include <linux/module.h> #include <linux/pci.h> +#include "pci.h" #include "rtw8822ce.h" static const struct pci_device_id rtw_8822ce_id_table[] = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h index c2c0e8675d74..fee32d7a4504 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.h @@ -5,10 +5,6 @@ #ifndef __RTW_8822CE_H_ #define __RTW_8822CE_H_ -extern const struct dev_pm_ops rtw_pm_ops; extern struct rtw_chip_info rtw8822c_hw_spec; -int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); -void rtw_pci_remove(struct pci_dev *pdev); -void rtw_pci_shutdown(struct pci_dev *pdev); #endif diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c index 2fcdf70a3a77..fc9544f4e5e4 100644 --- a/drivers/net/wireless/realtek/rtw88/wow.c +++ b/drivers/net/wireless/realtek/rtw88/wow.c @@ -332,7 +332,8 @@ static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw, key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; break; default: - rtw_err(rtwdev, "Unsupported key type for wowlan mode\n"); + rtw_err(rtwdev, "Unsupported key type for wowlan mode: %#x\n", + key->cipher); hw_key_type = 0; break; } @@ -555,7 +556,7 @@ static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev) int ret = 0; if (test_bit(RTW_FLAG_WOWLAN, rtwdev->flags)) { - if (rtw_fw_lps_deep_mode) + if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE) rtw_leave_lps_deep(rtwdev); } else { if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) { @@ -616,7 +617,8 @@ static int rtw_wow_enter_ps(struct rtw_dev *rtwdev) if (rtw_wow_mgd_linked(rtwdev)) ret = rtw_wow_enter_linked_ps(rtwdev); - else if (rtw_wow_no_link(rtwdev) && rtw_fw_lps_deep_mode) + else if (rtw_wow_no_link(rtwdev) && + rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE) ret = rtw_wow_enter_no_link_ps(rtwdev); return ret; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 75b5d545b49e..9fe77556858e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -3379,7 +3379,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, - .ndo_get_stats64 = usbnet_get_stats64, + .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = rndis_wlan_set_multicast_list, diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 3f7e3cfb6f00..ce9892152f4d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -248,7 +248,8 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - if ((skb->len - header_size) == EAPOL4_PACKET_LEN) { + if (((skb->len - header_size) == EAPOL4_PACKET_LEN) || + ((skb->len - header_size) == EAPOL4_PACKET_LEN - 2)) { data_desc->misc_flags |= RSI_DESC_REQUIRE_CFM_TO_HOST; xtend_desc->confirm_frame_type = EAPOL4_CONFIRM; diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index a7b8684143f4..592e9dadcb55 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -153,9 +153,7 @@ static void rsi_handle_interrupt(struct sdio_func *function) if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; - dev->sdio_irq_task = current; - rsi_interrupt_handler(adapter); - dev->sdio_irq_task = NULL; + rsi_set_event(&dev->rx_thread.event); } /** @@ -1058,8 +1056,6 @@ static int rsi_probe(struct sdio_func *pfunction, rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__); goto fail_kill_thread; } - skb_queue_head_init(&sdev->rx_q.head); - sdev->rx_q.num_rx_pkts = 0; sdio_claim_host(pfunction); if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) { diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 7825c9a889d3..8ace1874e5cb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -60,39 +60,20 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word) return status; } +static void rsi_rx_handler(struct rsi_hw *adapter); + void rsi_sdio_rx_thread(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; struct rsi_91x_sdiodev *sdev = adapter->rsi_dev; - struct sk_buff *skb; - int status; do { rsi_wait_event(&sdev->rx_thread.event, EVENT_WAIT_FOREVER); rsi_reset_event(&sdev->rx_thread.event); + rsi_rx_handler(adapter); + } while (!atomic_read(&sdev->rx_thread.thread_done)); - while (true) { - if (atomic_read(&sdev->rx_thread.thread_done)) - goto out; - - skb = skb_dequeue(&sdev->rx_q.head); - if (!skb) - break; - if (sdev->rx_q.num_rx_pkts > 0) - sdev->rx_q.num_rx_pkts--; - status = rsi_read_pkt(common, skb->data, skb->len); - if (status) { - rsi_dbg(ERR_ZONE, "Failed to read the packet\n"); - dev_kfree_skb(skb); - break; - } - dev_kfree_skb(skb); - } - } while (1); - -out: rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__); - skb_queue_purge(&sdev->rx_q.head); atomic_inc(&sdev->rx_thread.thread_done); complete_and_exit(&sdev->rx_thread.completion, 0); } @@ -113,10 +94,6 @@ static int rsi_process_pkt(struct rsi_common *common) u32 rcv_pkt_len = 0; int status = 0; u8 value = 0; - struct sk_buff *skb; - - if (dev->rx_q.num_rx_pkts >= RSI_MAX_RX_PKTS) - return 0; num_blks = ((adapter->interrupt_status & 1) | ((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1)); @@ -144,22 +121,19 @@ static int rsi_process_pkt(struct rsi_common *common) rcv_pkt_len = (num_blks * 256); - skb = dev_alloc_skb(rcv_pkt_len); - if (!skb) - return -ENOMEM; - - status = rsi_sdio_host_intf_read_pkt(adapter, skb->data, rcv_pkt_len); + status = rsi_sdio_host_intf_read_pkt(adapter, dev->pktbuffer, + rcv_pkt_len); if (status) { rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n", __func__); - dev_kfree_skb(skb); return status; } - skb_put(skb, rcv_pkt_len); - skb_queue_tail(&dev->rx_q.head, skb); - dev->rx_q.num_rx_pkts++; - rsi_set_event(&dev->rx_thread.event); + status = rsi_read_pkt(common, dev->pktbuffer, rcv_pkt_len); + if (status) { + rsi_dbg(ERR_ZONE, "Failed to read the packet\n"); + return status; + } return 0; } @@ -251,18 +225,17 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) } /** - * rsi_interrupt_handler() - This function read and process SDIO interrupts. + * rsi_rx_handler() - Read and process SDIO interrupts. * @adapter: Pointer to the adapter structure. * * Return: None. */ -void rsi_interrupt_handler(struct rsi_hw *adapter) +static void rsi_rx_handler(struct rsi_hw *adapter) { struct rsi_common *common = adapter->priv; struct rsi_91x_sdiodev *dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; int status; - enum sdio_interrupt_type isr_type; u8 isr_status = 0; u8 fw_status = 0; @@ -293,73 +266,69 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) __func__, isr_status, (1 << MSDU_PKT_PENDING), (1 << FW_ASSERT_IND)); - do { - RSI_GET_SDIO_INTERRUPT_TYPE(isr_status, isr_type); - - switch (isr_type) { - case BUFFER_AVAILABLE: - status = rsi_sdio_check_buffer_status(adapter, - 0); - if (status < 0) - rsi_dbg(ERR_ZONE, - "%s: Failed to check buffer status\n", - __func__); - rsi_sdio_ack_intr(common->priv, - (1 << PKT_BUFF_AVAILABLE)); - rsi_set_event(&common->tx_thread.event); - - rsi_dbg(ISR_ZONE, - "%s: ==> BUFFER_AVAILABLE <==\n", - __func__); - dev->buff_status_updated = true; - break; - - case FIRMWARE_ASSERT_IND: + if (isr_status & BIT(PKT_BUFF_AVAILABLE)) { + status = rsi_sdio_check_buffer_status(adapter, 0); + if (status < 0) rsi_dbg(ERR_ZONE, - "%s: ==> FIRMWARE Assert <==\n", + "%s: Failed to check buffer status\n", __func__); - status = rsi_sdio_read_register(common->priv, + rsi_sdio_ack_intr(common->priv, + BIT(PKT_BUFF_AVAILABLE)); + rsi_set_event(&common->tx_thread.event); + + rsi_dbg(ISR_ZONE, "%s: ==> BUFFER_AVAILABLE <==\n", + __func__); + dev->buff_status_updated = true; + + isr_status &= ~BIT(PKT_BUFF_AVAILABLE); + } + + if (isr_status & BIT(FW_ASSERT_IND)) { + rsi_dbg(ERR_ZONE, "%s: ==> FIRMWARE Assert <==\n", + __func__); + status = rsi_sdio_read_register(common->priv, SDIO_FW_STATUS_REG, &fw_status); - if (status) { - rsi_dbg(ERR_ZONE, - "%s: Failed to read f/w reg\n", - __func__); - } else { - rsi_dbg(ERR_ZONE, - "%s: Firmware Status is 0x%x\n", - __func__ , fw_status); - rsi_sdio_ack_intr(common->priv, - (1 << FW_ASSERT_IND)); - } - - common->fsm_state = FSM_CARD_NOT_READY; - break; - - case MSDU_PACKET_PENDING: - rsi_dbg(ISR_ZONE, "Pkt pending interrupt\n"); - dev->rx_info.total_sdio_msdu_pending_intr++; - - status = rsi_process_pkt(common); - if (status) { - rsi_dbg(ERR_ZONE, - "%s: Failed to read pkt\n", - __func__); - mutex_unlock(&common->rx_lock); - return; - } - break; - default: - rsi_sdio_ack_intr(common->priv, isr_status); - dev->rx_info.total_sdio_unknown_intr++; - isr_status = 0; - rsi_dbg(ISR_ZONE, - "Unknown Interrupt %x\n", - isr_status); - break; + if (status) { + rsi_dbg(ERR_ZONE, + "%s: Failed to read f/w reg\n", + __func__); + } else { + rsi_dbg(ERR_ZONE, + "%s: Firmware Status is 0x%x\n", + __func__, fw_status); + rsi_sdio_ack_intr(common->priv, + BIT(FW_ASSERT_IND)); } - isr_status ^= BIT(isr_type - 1); - } while (isr_status); + + common->fsm_state = FSM_CARD_NOT_READY; + + isr_status &= ~BIT(FW_ASSERT_IND); + } + + if (isr_status & BIT(MSDU_PKT_PENDING)) { + rsi_dbg(ISR_ZONE, "Pkt pending interrupt\n"); + dev->rx_info.total_sdio_msdu_pending_intr++; + + status = rsi_process_pkt(common); + if (status) { + rsi_dbg(ERR_ZONE, "%s: Failed to read pkt\n", + __func__); + mutex_unlock(&common->rx_lock); + return; + } + + isr_status &= ~BIT(MSDU_PKT_PENDING); + } + + if (isr_status) { + rsi_sdio_ack_intr(common->priv, isr_status); + dev->rx_info.total_sdio_unknown_intr++; + isr_status = 0; + rsi_dbg(ISR_ZONE, "Unknown Interrupt %x\n", + isr_status); + } + mutex_unlock(&common->rx_lock); } while (1); } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index a62d41c0ccbc..a4a533c2a783 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any @@ -75,7 +75,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter, * rsi_write_multiple() - This function writes multiple bytes of information * to the USB card. * @adapter: Pointer to the adapter structure. - * @addr: Address of the register. + * @endpoint: Type of endpoint. * @data: Pointer to the data that has to be written. * @count: Number of multiple bytes to be written. * @@ -313,6 +313,8 @@ static void rsi_rx_urb_kill(struct rsi_hw *adapter, u8 ep_num) /** * rsi_rx_urb_submit() - This function submits the given URB to the USB stack. * @adapter: Pointer to the adapter structure. + * @ep_num: Endpoint number. + * @mem_flags: The type of memory to allocate. * * Return: 0 on success, a negative error code on failure. */ @@ -741,24 +743,24 @@ static int rsi_reset_card(struct rsi_hw *adapter) if (ret < 0) goto fail; } else { - if ((rsi_usb_master_reg_write(adapter, - NWP_WWD_INTERRUPT_TIMER, - NWP_WWD_INT_TIMER_CLKS, - RSI_9116_REG_SIZE)) < 0) { + ret = rsi_usb_master_reg_write(adapter, + NWP_WWD_INTERRUPT_TIMER, + NWP_WWD_INT_TIMER_CLKS, + RSI_9116_REG_SIZE); + if (ret < 0) goto fail; - } - if ((rsi_usb_master_reg_write(adapter, - NWP_WWD_SYSTEM_RESET_TIMER, - NWP_WWD_SYS_RESET_TIMER_CLKS, - RSI_9116_REG_SIZE)) < 0) { + ret = rsi_usb_master_reg_write(adapter, + NWP_WWD_SYSTEM_RESET_TIMER, + NWP_WWD_SYS_RESET_TIMER_CLKS, + RSI_9116_REG_SIZE); + if (ret < 0) goto fail; - } - if ((rsi_usb_master_reg_write(adapter, - NWP_WWD_MODE_AND_RSTART, - NWP_WWD_TIMER_DISABLE, - RSI_9116_REG_SIZE)) < 0) { + ret = rsi_usb_master_reg_write(adapter, + NWP_WWD_MODE_AND_RSTART, + NWP_WWD_TIMER_DISABLE, + RSI_9116_REG_SIZE); + if (ret < 0) goto fail; - } } rsi_dbg(INFO_ZONE, "Reset card done\n"); diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index b1687d22f73f..4ffcdde1acb1 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -1,4 +1,4 @@ -/** +/* * Copyright (c) 2014 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 9afc1d0d2684..1c756263cf15 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -107,11 +107,6 @@ struct receive_info { u32 buf_available_counter; }; -struct rsi_sdio_rx_q { - u8 num_rx_pkts; - struct sk_buff_head head; -}; - struct rsi_91x_sdiodev { struct sdio_func *pfunction; struct task_struct *sdio_irq_task; @@ -124,11 +119,10 @@ struct rsi_91x_sdiodev { u16 tx_blk_size; u8 write_fail; bool buff_status_updated; - struct rsi_sdio_rx_q rx_q; struct rsi_thread rx_thread; + u8 pktbuffer[8192] __aligned(4); }; -void rsi_interrupt_handler(struct rsi_hw *adapter); int rsi_init_sdio_slave_regs(struct rsi_hw *adapter); int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data); int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length); diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c index 02efe8483cba..c364a3987618 100644 --- a/drivers/net/wireless/st/cw1200/bh.c +++ b/drivers/net/wireless/st/cw1200/bh.c @@ -85,7 +85,7 @@ int cw1200_register_bh(struct cw1200_common *priv) void cw1200_unregister_bh(struct cw1200_common *priv) { - atomic_add(1, &priv->bh_term); + atomic_inc(&priv->bh_term); wake_up(&priv->bh_wq); flush_workqueue(priv->bh_workqueue); @@ -107,7 +107,7 @@ void cw1200_irq_handler(struct cw1200_common *priv) if (/* WARN_ON */(priv->bh_error)) return; - if (atomic_add_return(1, &priv->bh_rx) == 1) + if (atomic_inc_return(&priv->bh_rx) == 1) wake_up(&priv->bh_wq); } EXPORT_SYMBOL_GPL(cw1200_irq_handler); @@ -120,7 +120,7 @@ void cw1200_bh_wakeup(struct cw1200_common *priv) return; } - if (atomic_add_return(1, &priv->bh_tx) == 1) + if (atomic_inc_return(&priv->bh_tx) == 1) wake_up(&priv->bh_wq); } @@ -382,7 +382,7 @@ static int cw1200_bh_tx_helper(struct cw1200_common *priv, BUG_ON(tx_len < sizeof(*wsm)); BUG_ON(__le16_to_cpu(wsm->len) != tx_len); - atomic_add(1, &priv->bh_tx); + atomic_inc(&priv->bh_tx); tx_len = priv->hwbus_ops->align_size( priv->hwbus_priv, tx_len); @@ -537,7 +537,7 @@ static int cw1200_bh(void *arg) pr_debug("[BH] Device resume.\n"); atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); wake_up(&priv->bh_evt_wq); - atomic_add(1, &priv->bh_rx); + atomic_inc(&priv->bh_rx); goto done; } diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c index f7fe56affbcd..326b1cc1d2bc 100644 --- a/drivers/net/wireless/st/cw1200/main.c +++ b/drivers/net/wireless/st/cw1200/main.c @@ -381,6 +381,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, CW1200_LINK_ID_MAX, cw1200_skb_dtor, priv)) { + destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } @@ -392,6 +393,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, for (; i > 0; i--) cw1200_queue_deinit(&priv->tx_queue[i - 1]); cw1200_queue_stats_deinit(&priv->tx_queue_stats); + destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index 400dd585916b..7de666b90ff5 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -650,7 +650,7 @@ cw1200_tx_h_rate_policy(struct cw1200_common *priv, wsm->flags |= t->txpriv.rate_id << 4; t->rate = cw1200_get_tx_rate(priv, - &t->tx_info->control.rates[0]), + &t->tx_info->control.rates[0]); wsm->max_tx_rate = t->rate->hw_value; if (t->rate->flags & IEEE80211_TX_RC_MCS) { if (cw1200_ht_greenfield(&priv->ht_info)) diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c index d9b6147bbb52..99624dd34886 100644 --- a/drivers/net/wireless/st/cw1200/wsm.c +++ b/drivers/net/wireless/st/cw1200/wsm.c @@ -1139,7 +1139,7 @@ static int wsm_cmd_send(struct cw1200_common *priv, pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used); /* Kill BH thread to report the error to the top layer. */ - atomic_add(1, &priv->bh_term); + atomic_inc(&priv->bh_term); wake_up(&priv->bh_wq); ret = -ETIMEDOUT; } @@ -1160,7 +1160,7 @@ done: void wsm_lock_tx(struct cw1200_common *priv) { wsm_cmd_lock(priv); - if (atomic_add_return(1, &priv->tx_lock) == 1) { + if (atomic_inc_return(&priv->tx_lock) == 1) { if (wsm_flush_tx(priv)) pr_debug("[WSM] TX is locked.\n"); } @@ -1169,7 +1169,7 @@ void wsm_lock_tx(struct cw1200_common *priv) void wsm_lock_tx_async(struct cw1200_common *priv) { - if (atomic_add_return(1, &priv->tx_lock) == 1) + if (atomic_inc_return(&priv->tx_lock) == 1) pr_debug("[WSM] TX is locked (async).\n"); } @@ -1223,7 +1223,7 @@ bool wsm_flush_tx(struct cw1200_common *priv) void wsm_unlock_tx(struct cw1200_common *priv) { int tx_lock; - tx_lock = atomic_sub_return(1, &priv->tx_lock); + tx_lock = atomic_dec_return(&priv->tx_lock); BUG_ON(tx_lock < 0); if (tx_lock == 0) { diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c index 9547aea01b0f..e1095b8de2bd 100644 --- a/drivers/net/wireless/ti/wl1251/cmd.c +++ b/drivers/net/wireless/ti/wl1251/cmd.c @@ -63,7 +63,7 @@ out: * * @wl: wl struct * @buf: buffer containing the command, with all headers, must work with dma - * @len: length of the buffer + * @buf_len: length of the buffer * @answer: is answer needed */ int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer) diff --git a/drivers/net/wireless/ti/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c index d48746e640cc..a1b778a0fda0 100644 --- a/drivers/net/wireless/ti/wl1251/debugfs.c +++ b/drivers/net/wireless/ti/wl1251/debugfs.c @@ -39,7 +39,7 @@ static const struct file_operations name## _ops = { \ #define DEBUGFS_ADD(name, parent) \ wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ - wl, &name## _ops); \ + wl, &name## _ops) \ #define DEBUGFS_DEL(name) \ do { \ diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 6863fd552d5e..122c7a4b374f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -2227,7 +2227,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_CLIENT: wlvif->p2p = 1; - /* fall-through */ + fallthrough; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_DEVICE: wlvif->bss_type = BSS_TYPE_STA_BSS; @@ -2237,7 +2237,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) break; case NL80211_IFTYPE_P2P_GO: wlvif->p2p = 1; - /* fall-through */ + fallthrough; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: wlvif->bss_type = BSS_TYPE_AP_BSS; diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 18c4d998ce4b..f26fc150ecd0 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -391,7 +391,7 @@ static int wl12xx_spi_set_power(struct device *child, bool enable) return ret; } -/** +/* * wl12xx_spi_set_block_size * * This function is not needed for spi mode, but need to be present. @@ -431,7 +431,6 @@ MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table); /** * wlcore_probe_of - DT node parsing. * @spi: SPI slave device parameters. - * @res: resource parameters. * @glue: wl12xx SPI bus to slave device glue parameters. * @pdev_data: wlcore device parameters */ diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index 7ac1814182ba..5cf0379b88b6 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -100,7 +100,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct wl1271 *wl = dev_get_drvdata(dev); ssize_t len; int ret; diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 026e88b80bfc..8ca5789c7b37 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -134,7 +134,7 @@ static const struct { /** * iw_valid_channel - validate channel in regulatory domain - * @reg_comain: regulatory domain + * @reg_domain: regulatory domain * @channel: channel to validate * * Returns 0 if invalid in the specified regulatory domain, non-zero if valid. @@ -458,11 +458,9 @@ out: /** * wl3501_send_pkt - Send a packet. * @this: Card - * - * Send a packet. - * - * data = Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr, + * @data: Ethernet raw frame. (e.g. data[0] - data[5] is Dest MAC Addr, * data[6] - data[11] is Src MAC Addr) + * @len: Packet length * Ref: IEEE 802.11 */ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len) diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 718c4ee865ba..097805b55c59 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -966,7 +966,7 @@ static int zd1201_set_mode(struct net_device *dev, */ zd1201_join(zd, "\0-*#\0", 5); /* Put port in pIBSS */ - /* Fall through */ + fallthrough; case 8: /* No pseudo-IBSS in wireless extensions (yet) */ porttype = ZD1201_PORTTYPE_PSEUDOIBSS; break; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 66367ab7e4c1..5c4cd0e1adeb 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -1711,11 +1711,6 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, count, USB_MAX_IOREAD16_COUNT); return -EINVAL; } - if (in_atomic()) { - dev_dbg_f(zd_usb_dev(usb), - "error: io in atomic context not supported\n"); - return -EWOULDBLOCK; - } if (!usb_int_enabled(usb)) { dev_dbg_f(zd_usb_dev(usb), "error: usb interrupt not enabled\n"); @@ -1882,11 +1877,6 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, count, USB_MAX_IOWRITE16_COUNT); return -EINVAL; } - if (in_atomic()) { - dev_dbg_f(zd_usb_dev(usb), - "error: io in atomic context not supported\n"); - return -EWOULDBLOCK; - } udev = zd_usb_to_usbdev(usb); @@ -1966,11 +1956,6 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) int i, req_len, actual_req_len; u16 bit_value_template; - if (in_atomic()) { - dev_dbg_f(zd_usb_dev(usb), - "error: io in atomic context not supported\n"); - return -EWOULDBLOCK; - } if (bits < USB_MIN_RFWRITE_BIT_COUNT) { dev_dbg_f(zd_usb_dev(usb), "error: bits %d are smaller than" diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 3e9895bec15f..b01848ef4649 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -2014,7 +2014,7 @@ static int xennet_create_page_pool(struct netfront_queue *queue) } err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, - queue->id); + queue->id, 0); if (err) { netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); goto err_free_pp; @@ -2413,12 +2413,11 @@ static ssize_t store_rxbuf(struct device *dev, const char *buf, size_t len) { char *endp; - unsigned long target; if (!capable(CAP_NET_ADMIN)) return -EPERM; - target = simple_strtoul(buf, &endp, 0); + simple_strtoul(buf, &endp, 0); if (endp == buf) return -EBADMSG; |